Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/blkdev.h>
   8#include <linux/writeback.h>
   9#include <linux/sched/mm.h>
  10#include "messages.h"
  11#include "misc.h"
  12#include "ctree.h"
  13#include "transaction.h"
  14#include "btrfs_inode.h"
  15#include "extent_io.h"
  16#include "disk-io.h"
  17#include "compression.h"
  18#include "delalloc-space.h"
  19#include "qgroup.h"
  20#include "subpage.h"
  21#include "file.h"
  22#include "super.h"
  23
  24static struct kmem_cache *btrfs_ordered_extent_cache;
  25
  26static u64 entry_end(struct btrfs_ordered_extent *entry)
  27{
  28	if (entry->file_offset + entry->num_bytes < entry->file_offset)
  29		return (u64)-1;
  30	return entry->file_offset + entry->num_bytes;
  31}
  32
  33/* returns NULL if the insertion worked, or it returns the node it did find
  34 * in the tree
  35 */
  36static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  37				   struct rb_node *node)
  38{
  39	struct rb_node **p = &root->rb_node;
  40	struct rb_node *parent = NULL;
  41	struct btrfs_ordered_extent *entry;
  42
  43	while (*p) {
  44		parent = *p;
  45		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  46
  47		if (file_offset < entry->file_offset)
  48			p = &(*p)->rb_left;
  49		else if (file_offset >= entry_end(entry))
  50			p = &(*p)->rb_right;
  51		else
  52			return parent;
  53	}
  54
  55	rb_link_node(node, parent, p);
  56	rb_insert_color(node, root);
  57	return NULL;
  58}
  59
 
 
 
 
 
 
 
 
  60/*
  61 * look for a given offset in the tree, and if it can't be found return the
  62 * first lesser offset
  63 */
  64static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  65				     struct rb_node **prev_ret)
  66{
  67	struct rb_node *n = root->rb_node;
  68	struct rb_node *prev = NULL;
  69	struct rb_node *test;
  70	struct btrfs_ordered_extent *entry;
  71	struct btrfs_ordered_extent *prev_entry = NULL;
  72
  73	while (n) {
  74		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  75		prev = n;
  76		prev_entry = entry;
  77
  78		if (file_offset < entry->file_offset)
  79			n = n->rb_left;
  80		else if (file_offset >= entry_end(entry))
  81			n = n->rb_right;
  82		else
  83			return n;
  84	}
  85	if (!prev_ret)
  86		return NULL;
  87
  88	while (prev && file_offset >= entry_end(prev_entry)) {
  89		test = rb_next(prev);
  90		if (!test)
  91			break;
  92		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  93				      rb_node);
  94		if (file_offset < entry_end(prev_entry))
  95			break;
  96
  97		prev = test;
  98	}
  99	if (prev)
 100		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 101				      rb_node);
 102	while (prev && file_offset < entry_end(prev_entry)) {
 103		test = rb_prev(prev);
 104		if (!test)
 105			break;
 106		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 107				      rb_node);
 108		prev = test;
 109	}
 110	*prev_ret = prev;
 111	return NULL;
 112}
 113
 
 
 
 
 
 
 
 
 
 
 
 114static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 115			  u64 len)
 116{
 117	if (file_offset + len <= entry->file_offset ||
 118	    entry->file_offset + entry->num_bytes <= file_offset)
 119		return 0;
 120	return 1;
 121}
 122
 123/*
 124 * look find the first ordered struct that has this offset, otherwise
 125 * the first one less than this offset
 126 */
 127static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
 128					  u64 file_offset)
 129{
 130	struct rb_root *root = &tree->tree;
 131	struct rb_node *prev = NULL;
 132	struct rb_node *ret;
 133	struct btrfs_ordered_extent *entry;
 134
 135	if (tree->last) {
 136		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
 137				 rb_node);
 138		if (in_range(file_offset, entry->file_offset, entry->num_bytes))
 139			return tree->last;
 140	}
 141	ret = __tree_search(root, file_offset, &prev);
 142	if (!ret)
 143		ret = prev;
 144	if (ret)
 145		tree->last = ret;
 146	return ret;
 147}
 148
 149/*
 150 * Add an ordered extent to the per-inode tree.
 151 *
 152 * @inode:           Inode that this extent is for.
 153 * @file_offset:     Logical offset in file where the extent starts.
 154 * @num_bytes:       Logical length of extent in file.
 155 * @ram_bytes:       Full length of unencoded data.
 156 * @disk_bytenr:     Offset of extent on disk.
 157 * @disk_num_bytes:  Size of extent on disk.
 158 * @offset:          Offset into unencoded data where file data starts.
 159 * @flags:           Flags specifying type of extent (1 << BTRFS_ORDERED_*).
 160 * @compress_type:   Compression algorithm used for data.
 161 *
 162 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
 163 * tree is given a single reference on the ordered extent that was inserted.
 164 *
 165 * Return: 0 or -ENOMEM.
 
 166 */
 167int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
 168			     u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
 169			     u64 disk_num_bytes, u64 offset, unsigned flags,
 170			     int compress_type)
 171{
 172	struct btrfs_root *root = inode->root;
 173	struct btrfs_fs_info *fs_info = root->fs_info;
 174	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 175	struct rb_node *node;
 176	struct btrfs_ordered_extent *entry;
 177	int ret;
 178
 179	if (flags &
 180	    ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
 181		/* For nocow write, we can release the qgroup rsv right now */
 182		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
 183		if (ret < 0)
 184			return ret;
 185		ret = 0;
 186	} else {
 187		/*
 188		 * The ordered extent has reserved qgroup space, release now
 189		 * and pass the reserved number for qgroup_record to free.
 190		 */
 191		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
 192		if (ret < 0)
 193			return ret;
 194	}
 195	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 196	if (!entry)
 197		return -ENOMEM;
 198
 199	entry->file_offset = file_offset;
 200	entry->num_bytes = num_bytes;
 201	entry->ram_bytes = ram_bytes;
 202	entry->disk_bytenr = disk_bytenr;
 203	entry->disk_num_bytes = disk_num_bytes;
 204	entry->offset = offset;
 205	entry->bytes_left = num_bytes;
 206	entry->inode = igrab(&inode->vfs_inode);
 
 207	entry->compress_type = compress_type;
 208	entry->truncated_len = (u64)-1;
 209	entry->qgroup_rsv = ret;
 210	entry->physical = (u64)-1;
 211
 212	ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
 213	entry->flags = flags;
 214
 215	percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
 216				 fs_info->delalloc_batch);
 217
 218	/* one ref for the tree */
 219	refcount_set(&entry->refs, 1);
 220	init_waitqueue_head(&entry->wait);
 221	INIT_LIST_HEAD(&entry->list);
 222	INIT_LIST_HEAD(&entry->log_list);
 223	INIT_LIST_HEAD(&entry->root_extent_list);
 224	INIT_LIST_HEAD(&entry->work_list);
 225	init_completion(&entry->completion);
 
 226
 227	trace_btrfs_ordered_extent_add(inode, entry);
 228
 229	spin_lock_irq(&tree->lock);
 230	node = tree_insert(&tree->tree, file_offset,
 231			   &entry->rb_node);
 232	if (node)
 233		btrfs_panic(fs_info, -EEXIST,
 234				"inconsistency in ordered tree at offset %llu",
 235				file_offset);
 236	spin_unlock_irq(&tree->lock);
 237
 238	spin_lock(&root->ordered_extent_lock);
 239	list_add_tail(&entry->root_extent_list,
 240		      &root->ordered_extents);
 241	root->nr_ordered_extents++;
 242	if (root->nr_ordered_extents == 1) {
 243		spin_lock(&fs_info->ordered_root_lock);
 244		BUG_ON(!list_empty(&root->ordered_root));
 245		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
 246		spin_unlock(&fs_info->ordered_root_lock);
 
 247	}
 248	spin_unlock(&root->ordered_extent_lock);
 249
 250	/*
 251	 * We don't need the count_max_extents here, we can assume that all of
 252	 * that work has been done at higher layers, so this is truly the
 253	 * smallest the extent is going to get.
 254	 */
 255	spin_lock(&inode->lock);
 256	btrfs_mod_outstanding_extents(inode, 1);
 257	spin_unlock(&inode->lock);
 258
 259	return 0;
 260}
 261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 262/*
 263 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 264 * when an ordered extent is finished.  If the list covers more than one
 265 * ordered extent, it is split across multiples.
 266 */
 267void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
 
 268			   struct btrfs_ordered_sum *sum)
 269{
 270	struct btrfs_ordered_inode_tree *tree;
 271
 272	tree = &BTRFS_I(entry->inode)->ordered_tree;
 273	spin_lock_irq(&tree->lock);
 274	list_add_tail(&sum->list, &entry->list);
 
 
 
 
 275	spin_unlock_irq(&tree->lock);
 276}
 277
 278static void finish_ordered_fn(struct btrfs_work *work)
 279{
 280	struct btrfs_ordered_extent *ordered_extent;
 281
 282	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
 283	btrfs_finish_ordered_io(ordered_extent);
 284}
 285
 286/*
 287 * Mark all ordered extents io inside the specified range finished.
 
 
 
 288 *
 289 * @page:	 The involved page for the operation.
 290 *		 For uncompressed buffered IO, the page status also needs to be
 291 *		 updated to indicate whether the pending ordered io is finished.
 292 *		 Can be NULL for direct IO and compressed write.
 293 *		 For these cases, callers are ensured they won't execute the
 294 *		 endio function twice.
 295 *
 296 * This function is called for endio, thus the range must have ordered
 297 * extent(s) covering it.
 298 */
 299void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
 300				    struct page *page, u64 file_offset,
 301				    u64 num_bytes, bool uptodate)
 302{
 303	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 304	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 305	struct btrfs_workqueue *wq;
 306	struct rb_node *node;
 307	struct btrfs_ordered_extent *entry = NULL;
 
 308	unsigned long flags;
 309	u64 cur = file_offset;
 310
 311	if (btrfs_is_free_space_inode(inode))
 312		wq = fs_info->endio_freespace_worker;
 313	else
 314		wq = fs_info->endio_write_workers;
 315
 316	if (page)
 317		ASSERT(page->mapping && page_offset(page) <= file_offset &&
 318		       file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
 319
 
 320	spin_lock_irqsave(&tree->lock, flags);
 321	while (cur < file_offset + num_bytes) {
 322		u64 entry_end;
 323		u64 end;
 324		u32 len;
 325
 326		node = tree_search(tree, cur);
 327		/* No ordered extents at all */
 328		if (!node)
 329			break;
 330
 331		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 332		entry_end = entry->file_offset + entry->num_bytes;
 333		/*
 334		 * |<-- OE --->|  |
 335		 *		  cur
 336		 * Go to next OE.
 337		 */
 338		if (cur >= entry_end) {
 339			node = rb_next(node);
 340			/* No more ordered extents, exit */
 341			if (!node)
 342				break;
 343			entry = rb_entry(node, struct btrfs_ordered_extent,
 344					 rb_node);
 345
 346			/* Go to next ordered extent and continue */
 347			cur = entry->file_offset;
 348			continue;
 349		}
 350		/*
 351		 * |	|<--- OE --->|
 352		 * cur
 353		 * Go to the start of OE.
 354		 */
 355		if (cur < entry->file_offset) {
 356			cur = entry->file_offset;
 357			continue;
 358		}
 359
 360		/*
 361		 * Now we are definitely inside one ordered extent.
 362		 *
 363		 * |<--- OE --->|
 364		 *	|
 365		 *	cur
 366		 */
 367		end = min(entry->file_offset + entry->num_bytes,
 368			  file_offset + num_bytes) - 1;
 369		ASSERT(end + 1 - cur < U32_MAX);
 370		len = end + 1 - cur;
 371
 372		if (page) {
 373			/*
 374			 * Ordered (Private2) bit indicates whether we still
 375			 * have pending io unfinished for the ordered extent.
 376			 *
 377			 * If there's no such bit, we need to skip to next range.
 378			 */
 379			if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
 380				cur += len;
 381				continue;
 382			}
 383			btrfs_page_clear_ordered(fs_info, page, cur, len);
 384		}
 385
 386		/* Now we're fine to update the accounting */
 387		if (unlikely(len > entry->bytes_left)) {
 388			WARN_ON(1);
 389			btrfs_crit(fs_info,
 390"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
 391				   inode->root->root_key.objectid,
 392				   btrfs_ino(inode),
 393				   entry->file_offset,
 394				   entry->num_bytes,
 395				   len, entry->bytes_left);
 396			entry->bytes_left = 0;
 397		} else {
 398			entry->bytes_left -= len;
 399		}
 400
 401		if (!uptodate)
 402			set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 403
 404		/*
 405		 * All the IO of the ordered extent is finished, we need to queue
 406		 * the finish_func to be executed.
 407		 */
 408		if (entry->bytes_left == 0) {
 409			set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 410			cond_wake_up(&entry->wait);
 411			refcount_inc(&entry->refs);
 412			trace_btrfs_ordered_extent_mark_finished(inode, entry);
 413			spin_unlock_irqrestore(&tree->lock, flags);
 414			btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL);
 415			btrfs_queue_work(wq, &entry->work);
 416			spin_lock_irqsave(&tree->lock, flags);
 417		}
 418		cur += len;
 419	}
 420	spin_unlock_irqrestore(&tree->lock, flags);
 
 421}
 422
 423/*
 424 * Finish IO for one ordered extent across a given range.  The range can only
 425 * contain one ordered extent.
 426 *
 427 * @cached:	 The cached ordered extent. If not NULL, we can skip the tree
 428 *               search and use the ordered extent directly.
 429 * 		 Will be also used to store the finished ordered extent.
 430 * @file_offset: File offset for the finished IO
 431 * @io_size:	 Length of the finish IO range
 432 *
 433 * Return true if the ordered extent is finished in the range, and update
 434 * @cached.
 435 * Return false otherwise.
 436 *
 437 * NOTE: The range can NOT cross multiple ordered extents.
 438 * Thus caller should ensure the range doesn't cross ordered extents.
 439 */
 440bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
 441				    struct btrfs_ordered_extent **cached,
 442				    u64 file_offset, u64 io_size)
 443{
 444	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 445	struct rb_node *node;
 446	struct btrfs_ordered_extent *entry = NULL;
 447	unsigned long flags;
 448	bool finished = false;
 449
 
 450	spin_lock_irqsave(&tree->lock, flags);
 451	if (cached && *cached) {
 452		entry = *cached;
 453		goto have_entry;
 454	}
 455
 456	node = tree_search(tree, file_offset);
 457	if (!node)
 
 458		goto out;
 
 459
 460	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 461have_entry:
 462	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 
 463		goto out;
 
 464
 465	if (io_size > entry->bytes_left)
 466		btrfs_crit(inode->root->fs_info,
 467			   "bad ordered accounting left %llu size %llu",
 468		       entry->bytes_left, io_size);
 469
 470	entry->bytes_left -= io_size;
 
 
 471
 472	if (entry->bytes_left == 0) {
 473		/*
 474		 * Ensure only one caller can set the flag and finished_ret
 475		 * accordingly
 476		 */
 477		finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 478		/* test_and_set_bit implies a barrier */
 479		cond_wake_up_nomb(&entry->wait);
 480	}
 481out:
 482	if (finished && cached && entry) {
 483		*cached = entry;
 484		refcount_inc(&entry->refs);
 485		trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
 486	}
 487	spin_unlock_irqrestore(&tree->lock, flags);
 488	return finished;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 489}
 490
 491/*
 492 * used to drop a reference on an ordered extent.  This will free
 493 * the extent if the last reference is dropped
 494 */
 495void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 496{
 497	struct list_head *cur;
 498	struct btrfs_ordered_sum *sum;
 499
 500	trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
 501
 502	if (refcount_dec_and_test(&entry->refs)) {
 503		ASSERT(list_empty(&entry->root_extent_list));
 504		ASSERT(list_empty(&entry->log_list));
 505		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
 506		if (entry->inode)
 507			btrfs_add_delayed_iput(BTRFS_I(entry->inode));
 508		while (!list_empty(&entry->list)) {
 509			cur = entry->list.next;
 510			sum = list_entry(cur, struct btrfs_ordered_sum, list);
 511			list_del(&sum->list);
 512			kvfree(sum);
 513		}
 514		kmem_cache_free(btrfs_ordered_extent_cache, entry);
 515	}
 516}
 517
 518/*
 519 * remove an ordered extent from the tree.  No references are dropped
 520 * and waiters are woken up.
 521 */
 522void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
 523				 struct btrfs_ordered_extent *entry)
 524{
 525	struct btrfs_ordered_inode_tree *tree;
 526	struct btrfs_root *root = btrfs_inode->root;
 527	struct btrfs_fs_info *fs_info = root->fs_info;
 528	struct rb_node *node;
 529	bool pending;
 530	bool freespace_inode;
 531
 532	/*
 533	 * If this is a free space inode the thread has not acquired the ordered
 534	 * extents lockdep map.
 535	 */
 536	freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
 537
 538	btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
 539	/* This is paired with btrfs_add_ordered_extent. */
 540	spin_lock(&btrfs_inode->lock);
 541	btrfs_mod_outstanding_extents(btrfs_inode, -1);
 542	spin_unlock(&btrfs_inode->lock);
 543	if (root != fs_info->tree_root) {
 544		u64 release;
 545
 546		if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
 547			release = entry->disk_num_bytes;
 548		else
 549			release = entry->num_bytes;
 550		btrfs_delalloc_release_metadata(btrfs_inode, release, false);
 551	}
 552
 553	percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
 554				 fs_info->delalloc_batch);
 555
 556	tree = &btrfs_inode->ordered_tree;
 557	spin_lock_irq(&tree->lock);
 558	node = &entry->rb_node;
 559	rb_erase(node, &tree->tree);
 560	RB_CLEAR_NODE(node);
 561	if (tree->last == node)
 562		tree->last = NULL;
 563	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 564	pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
 565	spin_unlock_irq(&tree->lock);
 566
 567	/*
 568	 * The current running transaction is waiting on us, we need to let it
 569	 * know that we're complete and wake it up.
 570	 */
 571	if (pending) {
 572		struct btrfs_transaction *trans;
 573
 574		/*
 575		 * The checks for trans are just a formality, it should be set,
 576		 * but if it isn't we don't want to deref/assert under the spin
 577		 * lock, so be nice and check if trans is set, but ASSERT() so
 578		 * if it isn't set a developer will notice.
 579		 */
 580		spin_lock(&fs_info->trans_lock);
 581		trans = fs_info->running_transaction;
 582		if (trans)
 583			refcount_inc(&trans->use_count);
 584		spin_unlock(&fs_info->trans_lock);
 585
 586		ASSERT(trans);
 587		if (trans) {
 588			if (atomic_dec_and_test(&trans->pending_ordered))
 589				wake_up(&trans->pending_wait);
 590			btrfs_put_transaction(trans);
 591		}
 592	}
 593
 594	btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
 595
 596	spin_lock(&root->ordered_extent_lock);
 597	list_del_init(&entry->root_extent_list);
 598	root->nr_ordered_extents--;
 599
 600	trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
 
 
 
 
 
 
 
 
 
 
 
 
 601
 602	if (!root->nr_ordered_extents) {
 603		spin_lock(&fs_info->ordered_root_lock);
 604		BUG_ON(list_empty(&root->ordered_root));
 605		list_del_init(&root->ordered_root);
 606		spin_unlock(&fs_info->ordered_root_lock);
 607	}
 608	spin_unlock(&root->ordered_extent_lock);
 609	wake_up(&entry->wait);
 610	if (!freespace_inode)
 611		btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
 612}
 613
 614static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 615{
 616	struct btrfs_ordered_extent *ordered;
 617
 618	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 619	btrfs_start_ordered_extent(ordered, 1);
 620	complete(&ordered->completion);
 621}
 622
 623/*
 624 * wait for all the ordered extents in a root.  This is done when balancing
 625 * space between drives.
 626 */
 627u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 628			       const u64 range_start, const u64 range_len)
 629{
 630	struct btrfs_fs_info *fs_info = root->fs_info;
 631	LIST_HEAD(splice);
 632	LIST_HEAD(skipped);
 633	LIST_HEAD(works);
 634	struct btrfs_ordered_extent *ordered, *next;
 635	u64 count = 0;
 636	const u64 range_end = range_start + range_len;
 
 
 637
 638	mutex_lock(&root->ordered_extent_mutex);
 639	spin_lock(&root->ordered_extent_lock);
 640	list_splice_init(&root->ordered_extents, &splice);
 641	while (!list_empty(&splice) && nr) {
 642		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 643					   root_extent_list);
 644
 645		if (range_end <= ordered->disk_bytenr ||
 646		    ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
 647			list_move_tail(&ordered->root_extent_list, &skipped);
 648			cond_resched_lock(&root->ordered_extent_lock);
 649			continue;
 650		}
 651
 652		list_move_tail(&ordered->root_extent_list,
 653			       &root->ordered_extents);
 654		refcount_inc(&ordered->refs);
 655		spin_unlock(&root->ordered_extent_lock);
 656
 657		btrfs_init_work(&ordered->flush_work,
 658				btrfs_run_ordered_extent_work, NULL, NULL);
 659		list_add_tail(&ordered->work_list, &works);
 660		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 
 661
 662		cond_resched();
 663		spin_lock(&root->ordered_extent_lock);
 664		if (nr != U64_MAX)
 665			nr--;
 666		count++;
 667	}
 668	list_splice_tail(&skipped, &root->ordered_extents);
 669	list_splice_tail(&splice, &root->ordered_extents);
 670	spin_unlock(&root->ordered_extent_lock);
 671
 672	list_for_each_entry_safe(ordered, next, &works, work_list) {
 673		list_del_init(&ordered->work_list);
 674		wait_for_completion(&ordered->completion);
 675		btrfs_put_ordered_extent(ordered);
 676		cond_resched();
 677	}
 678	mutex_unlock(&root->ordered_extent_mutex);
 679
 680	return count;
 681}
 682
 683void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 684			     const u64 range_start, const u64 range_len)
 685{
 686	struct btrfs_root *root;
 687	struct list_head splice;
 688	u64 done;
 689
 690	INIT_LIST_HEAD(&splice);
 691
 692	mutex_lock(&fs_info->ordered_operations_mutex);
 693	spin_lock(&fs_info->ordered_root_lock);
 694	list_splice_init(&fs_info->ordered_roots, &splice);
 695	while (!list_empty(&splice) && nr) {
 696		root = list_first_entry(&splice, struct btrfs_root,
 697					ordered_root);
 698		root = btrfs_grab_root(root);
 699		BUG_ON(!root);
 700		list_move_tail(&root->ordered_root,
 701			       &fs_info->ordered_roots);
 702		spin_unlock(&fs_info->ordered_root_lock);
 703
 704		done = btrfs_wait_ordered_extents(root, nr,
 705						  range_start, range_len);
 706		btrfs_put_root(root);
 707
 708		spin_lock(&fs_info->ordered_root_lock);
 709		if (nr != U64_MAX) {
 710			nr -= done;
 
 711		}
 712	}
 713	list_splice_tail(&splice, &fs_info->ordered_roots);
 714	spin_unlock(&fs_info->ordered_root_lock);
 715	mutex_unlock(&fs_info->ordered_operations_mutex);
 716}
 717
 718/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 719 * Used to start IO or wait for a given ordered extent to finish.
 720 *
 721 * If wait is one, this effectively waits on page writeback for all the pages
 722 * in the extent, and it waits on the io completion code to insert
 723 * metadata into the btree corresponding to the extent
 724 */
 725void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
 
 
 726{
 727	u64 start = entry->file_offset;
 728	u64 end = start + entry->num_bytes - 1;
 729	struct btrfs_inode *inode = BTRFS_I(entry->inode);
 730	bool freespace_inode;
 731
 732	trace_btrfs_ordered_extent_start(inode, entry);
 733
 734	/*
 735	 * If this is a free space inode do not take the ordered extents lockdep
 736	 * map.
 737	 */
 738	freespace_inode = btrfs_is_free_space_inode(inode);
 739
 740	/*
 741	 * pages in the range can be dirty, clean or writeback.  We
 742	 * start IO on any dirty ones so the wait doesn't stall waiting
 743	 * for the flusher thread to find them
 744	 */
 745	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 746		filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
 747	if (wait) {
 748		if (!freespace_inode)
 749			btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
 750		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
 751						 &entry->flags));
 752	}
 753}
 754
 755/*
 756 * Used to wait on ordered extents across a large range of bytes.
 757 */
 758int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 759{
 760	int ret = 0;
 761	int ret_wb = 0;
 762	u64 end;
 763	u64 orig_end;
 764	struct btrfs_ordered_extent *ordered;
 765
 766	if (start + len < start) {
 767		orig_end = OFFSET_MAX;
 768	} else {
 769		orig_end = start + len - 1;
 770		if (orig_end > OFFSET_MAX)
 771			orig_end = OFFSET_MAX;
 772	}
 773
 774	/* start IO across the range first to instantiate any delalloc
 775	 * extents
 776	 */
 777	ret = btrfs_fdatawrite_range(inode, start, orig_end);
 778	if (ret)
 779		return ret;
 780
 781	/*
 782	 * If we have a writeback error don't return immediately. Wait first
 783	 * for any ordered extents that haven't completed yet. This is to make
 784	 * sure no one can dirty the same page ranges and call writepages()
 785	 * before the ordered extents complete - to avoid failures (-EEXIST)
 786	 * when adding the new ordered extents to the ordered tree.
 
 
 
 
 
 
 
 787	 */
 788	ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 
 
 
 
 
 
 
 
 
 789
 790	end = orig_end;
 791	while (1) {
 792		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
 793		if (!ordered)
 794			break;
 795		if (ordered->file_offset > orig_end) {
 796			btrfs_put_ordered_extent(ordered);
 797			break;
 798		}
 799		if (ordered->file_offset + ordered->num_bytes <= start) {
 800			btrfs_put_ordered_extent(ordered);
 801			break;
 802		}
 803		btrfs_start_ordered_extent(ordered, 1);
 804		end = ordered->file_offset;
 805		/*
 806		 * If the ordered extent had an error save the error but don't
 807		 * exit without waiting first for all other ordered extents in
 808		 * the range to complete.
 809		 */
 810		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 811			ret = -EIO;
 812		btrfs_put_ordered_extent(ordered);
 813		if (end == 0 || end == start)
 814			break;
 815		end--;
 816	}
 817	return ret_wb ? ret_wb : ret;
 818}
 819
 820/*
 821 * find an ordered extent corresponding to file_offset.  return NULL if
 822 * nothing is found, otherwise take a reference on the extent and return it
 823 */
 824struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
 825							 u64 file_offset)
 826{
 827	struct btrfs_ordered_inode_tree *tree;
 828	struct rb_node *node;
 829	struct btrfs_ordered_extent *entry = NULL;
 830	unsigned long flags;
 831
 832	tree = &inode->ordered_tree;
 833	spin_lock_irqsave(&tree->lock, flags);
 834	node = tree_search(tree, file_offset);
 835	if (!node)
 836		goto out;
 837
 838	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 839	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 840		entry = NULL;
 841	if (entry) {
 842		refcount_inc(&entry->refs);
 843		trace_btrfs_ordered_extent_lookup(inode, entry);
 844	}
 845out:
 846	spin_unlock_irqrestore(&tree->lock, flags);
 847	return entry;
 848}
 849
 850/* Since the DIO code tries to lock a wide area we need to look for any ordered
 851 * extents that exist in the range, rather than just the start of the range.
 852 */
 853struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 854		struct btrfs_inode *inode, u64 file_offset, u64 len)
 
 855{
 856	struct btrfs_ordered_inode_tree *tree;
 857	struct rb_node *node;
 858	struct btrfs_ordered_extent *entry = NULL;
 859
 860	tree = &inode->ordered_tree;
 861	spin_lock_irq(&tree->lock);
 862	node = tree_search(tree, file_offset);
 863	if (!node) {
 864		node = tree_search(tree, file_offset + len);
 865		if (!node)
 866			goto out;
 867	}
 868
 869	while (1) {
 870		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 871		if (range_overlaps(entry, file_offset, len))
 872			break;
 873
 874		if (entry->file_offset >= file_offset + len) {
 875			entry = NULL;
 876			break;
 877		}
 878		entry = NULL;
 879		node = rb_next(node);
 880		if (!node)
 881			break;
 882	}
 883out:
 884	if (entry) {
 885		refcount_inc(&entry->refs);
 886		trace_btrfs_ordered_extent_lookup_range(inode, entry);
 887	}
 888	spin_unlock_irq(&tree->lock);
 889	return entry;
 890}
 891
 892/*
 893 * Adds all ordered extents to the given list. The list ends up sorted by the
 894 * file_offset of the ordered extents.
 895 */
 896void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
 897					   struct list_head *list)
 898{
 899	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 900	struct rb_node *n;
 901
 902	ASSERT(inode_is_locked(&inode->vfs_inode));
 903
 904	spin_lock_irq(&tree->lock);
 905	for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
 906		struct btrfs_ordered_extent *ordered;
 907
 908		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
 909
 910		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
 911			continue;
 912
 913		ASSERT(list_empty(&ordered->log_list));
 914		list_add_tail(&ordered->log_list, list);
 915		refcount_inc(&ordered->refs);
 916		trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
 917	}
 918	spin_unlock_irq(&tree->lock);
 919}
 920
 921/*
 922 * lookup and return any extent before 'file_offset'.  NULL is returned
 923 * if none is found
 924 */
 925struct btrfs_ordered_extent *
 926btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
 927{
 928	struct btrfs_ordered_inode_tree *tree;
 929	struct rb_node *node;
 930	struct btrfs_ordered_extent *entry = NULL;
 931
 932	tree = &inode->ordered_tree;
 933	spin_lock_irq(&tree->lock);
 934	node = tree_search(tree, file_offset);
 935	if (!node)
 936		goto out;
 937
 938	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 939	refcount_inc(&entry->refs);
 940	trace_btrfs_ordered_extent_lookup_first(inode, entry);
 941out:
 942	spin_unlock_irq(&tree->lock);
 943	return entry;
 944}
 945
 946/*
 947 * Lookup the first ordered extent that overlaps the range
 948 * [@file_offset, @file_offset + @len).
 949 *
 950 * The difference between this and btrfs_lookup_first_ordered_extent() is
 951 * that this one won't return any ordered extent that does not overlap the range.
 952 * And the difference against btrfs_lookup_ordered_extent() is, this function
 953 * ensures the first ordered extent gets returned.
 954 */
 955struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
 956			struct btrfs_inode *inode, u64 file_offset, u64 len)
 957{
 958	struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
 
 
 
 959	struct rb_node *node;
 960	struct rb_node *cur;
 961	struct rb_node *prev;
 962	struct rb_node *next;
 963	struct btrfs_ordered_extent *entry = NULL;
 964
 965	spin_lock_irq(&tree->lock);
 966	node = tree->tree.rb_node;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967	/*
 968	 * Here we don't want to use tree_search() which will use tree->last
 969	 * and screw up the search order.
 970	 * And __tree_search() can't return the adjacent ordered extents
 971	 * either, thus here we do our own search.
 972	 */
 973	while (node) {
 974		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 975
 976		if (file_offset < entry->file_offset) {
 977			node = node->rb_left;
 978		} else if (file_offset >= entry_end(entry)) {
 979			node = node->rb_right;
 980		} else {
 981			/*
 982			 * Direct hit, got an ordered extent that starts at
 983			 * @file_offset
 984			 */
 985			goto out;
 986		}
 987	}
 988	if (!entry) {
 989		/* Empty tree */
 990		goto out;
 991	}
 992
 993	cur = &entry->rb_node;
 994	/* We got an entry around @file_offset, check adjacent entries */
 995	if (entry->file_offset < file_offset) {
 996		prev = cur;
 997		next = rb_next(cur);
 
 
 998	} else {
 999		prev = rb_prev(cur);
1000		next = cur;
1001	}
1002	if (prev) {
1003		entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1004		if (range_overlaps(entry, file_offset, len))
1005			goto out;
 
 
 
 
1006	}
1007	if (next) {
1008		entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1009		if (range_overlaps(entry, file_offset, len))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010			goto out;
 
1011	}
1012	/* No ordered extent in the range */
1013	entry = NULL;
1014out:
1015	if (entry) {
1016		refcount_inc(&entry->refs);
1017		trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1018	}
1019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020	spin_unlock_irq(&tree->lock);
1021	return entry;
1022}
1023
1024/*
1025 * Lock the passed range and ensures all pending ordered extents in it are run
1026 * to completion.
1027 *
1028 * @inode:        Inode whose ordered tree is to be searched
1029 * @start:        Beginning of range to flush
1030 * @end:          Last byte of range to lock
1031 * @cached_state: If passed, will return the extent state responsible for the
1032 *                locked range. It's the caller's responsibility to free the
1033 *                cached state.
1034 *
1035 * Always return with the given range locked, ensuring after it's called no
1036 * order extent can be pending.
1037 */
1038void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1039					u64 end,
1040					struct extent_state **cached_state)
1041{
 
1042	struct btrfs_ordered_extent *ordered;
1043	struct extent_state *cache = NULL;
1044	struct extent_state **cachedp = &cache;
 
 
 
1045
1046	if (cached_state)
1047		cachedp = cached_state;
 
1048
1049	while (1) {
1050		lock_extent(&inode->io_tree, start, end, cachedp);
1051		ordered = btrfs_lookup_ordered_range(inode, start,
1052						     end - start + 1);
1053		if (!ordered) {
1054			/*
1055			 * If no external cached_state has been passed then
1056			 * decrement the extra ref taken for cachedp since we
1057			 * aren't exposing it outside of this function
1058			 */
1059			if (!cached_state)
1060				refcount_dec(&cache->refs);
1061			break;
 
 
 
1062		}
1063		unlock_extent(&inode->io_tree, start, end, cachedp);
1064		btrfs_start_ordered_extent(ordered, 1);
1065		btrfs_put_ordered_extent(ordered);
1066	}
 
 
 
 
1067}
1068
 
1069/*
1070 * Lock the passed range and ensure all pending ordered extents in it are run
1071 * to completion in nowait mode.
 
 
 
1072 *
1073 * Return true if btrfs_lock_ordered_range does not return any extents,
1074 * otherwise false.
 
 
1075 */
1076bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1077				  struct extent_state **cached_state)
1078{
1079	struct btrfs_ordered_extent *ordered;
 
1080
1081	if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1082		return false;
1083
1084	ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1085	if (!ordered)
1086		return true;
1087
1088	btrfs_put_ordered_extent(ordered);
1089	unlock_extent(&inode->io_tree, start, end, cached_state);
1090
1091	return false;
1092}
1093
1094
1095static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1096				u64 len)
1097{
1098	struct inode *inode = ordered->inode;
1099	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1100	u64 file_offset = ordered->file_offset + pos;
1101	u64 disk_bytenr = ordered->disk_bytenr + pos;
1102	unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
1103
1104	/*
1105	 * The splitting extent is already counted and will be added again in
1106	 * btrfs_add_ordered_extent_*(). Subtract len to avoid double counting.
1107	 */
1108	percpu_counter_add_batch(&fs_info->ordered_bytes, -len,
1109				 fs_info->delalloc_batch);
1110	WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED));
1111	return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len,
1112					disk_bytenr, len, 0, flags,
1113					ordered->compress_type);
1114}
1115
1116int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1117				u64 post)
1118{
1119	struct inode *inode = ordered->inode;
1120	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1121	struct rb_node *node;
1122	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1123	int ret = 0;
1124
1125	trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
1126
1127	spin_lock_irq(&tree->lock);
1128	/* Remove from tree once */
1129	node = &ordered->rb_node;
1130	rb_erase(node, &tree->tree);
1131	RB_CLEAR_NODE(node);
1132	if (tree->last == node)
1133		tree->last = NULL;
1134
1135	ordered->file_offset += pre;
1136	ordered->disk_bytenr += pre;
1137	ordered->num_bytes -= (pre + post);
1138	ordered->disk_num_bytes -= (pre + post);
1139	ordered->bytes_left -= (pre + post);
1140
1141	/* Re-insert the node */
1142	node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1143	if (node)
1144		btrfs_panic(fs_info, -EEXIST,
1145			"zoned: inconsistency in ordered tree at offset %llu",
1146			    ordered->file_offset);
1147
1148	spin_unlock_irq(&tree->lock);
1149
1150	if (pre)
1151		ret = clone_ordered_extent(ordered, 0, pre);
1152	if (ret == 0 && post)
1153		ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1154					   post);
1155
1156	return ret;
1157}
1158
1159int __init ordered_data_init(void)
1160{
1161	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1162				     sizeof(struct btrfs_ordered_extent), 0,
1163				     SLAB_MEM_SPREAD,
1164				     NULL);
1165	if (!btrfs_ordered_extent_cache)
1166		return -ENOMEM;
1167
1168	return 0;
1169}
1170
1171void __cold ordered_data_exit(void)
1172{
1173	kmem_cache_destroy(btrfs_ordered_extent_cache);
 
1174}
v3.15
 
   1/*
   2 * Copyright (C) 2007 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/slab.h>
  20#include <linux/blkdev.h>
  21#include <linux/writeback.h>
  22#include <linux/pagevec.h>
 
 
  23#include "ctree.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "extent_io.h"
  27#include "disk-io.h"
 
 
 
 
 
 
  28
  29static struct kmem_cache *btrfs_ordered_extent_cache;
  30
  31static u64 entry_end(struct btrfs_ordered_extent *entry)
  32{
  33	if (entry->file_offset + entry->len < entry->file_offset)
  34		return (u64)-1;
  35	return entry->file_offset + entry->len;
  36}
  37
  38/* returns NULL if the insertion worked, or it returns the node it did find
  39 * in the tree
  40 */
  41static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  42				   struct rb_node *node)
  43{
  44	struct rb_node **p = &root->rb_node;
  45	struct rb_node *parent = NULL;
  46	struct btrfs_ordered_extent *entry;
  47
  48	while (*p) {
  49		parent = *p;
  50		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  51
  52		if (file_offset < entry->file_offset)
  53			p = &(*p)->rb_left;
  54		else if (file_offset >= entry_end(entry))
  55			p = &(*p)->rb_right;
  56		else
  57			return parent;
  58	}
  59
  60	rb_link_node(node, parent, p);
  61	rb_insert_color(node, root);
  62	return NULL;
  63}
  64
  65static void ordered_data_tree_panic(struct inode *inode, int errno,
  66					       u64 offset)
  67{
  68	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  69	btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
  70		    "%llu\n", offset);
  71}
  72
  73/*
  74 * look for a given offset in the tree, and if it can't be found return the
  75 * first lesser offset
  76 */
  77static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  78				     struct rb_node **prev_ret)
  79{
  80	struct rb_node *n = root->rb_node;
  81	struct rb_node *prev = NULL;
  82	struct rb_node *test;
  83	struct btrfs_ordered_extent *entry;
  84	struct btrfs_ordered_extent *prev_entry = NULL;
  85
  86	while (n) {
  87		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  88		prev = n;
  89		prev_entry = entry;
  90
  91		if (file_offset < entry->file_offset)
  92			n = n->rb_left;
  93		else if (file_offset >= entry_end(entry))
  94			n = n->rb_right;
  95		else
  96			return n;
  97	}
  98	if (!prev_ret)
  99		return NULL;
 100
 101	while (prev && file_offset >= entry_end(prev_entry)) {
 102		test = rb_next(prev);
 103		if (!test)
 104			break;
 105		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 106				      rb_node);
 107		if (file_offset < entry_end(prev_entry))
 108			break;
 109
 110		prev = test;
 111	}
 112	if (prev)
 113		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 114				      rb_node);
 115	while (prev && file_offset < entry_end(prev_entry)) {
 116		test = rb_prev(prev);
 117		if (!test)
 118			break;
 119		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 120				      rb_node);
 121		prev = test;
 122	}
 123	*prev_ret = prev;
 124	return NULL;
 125}
 126
 127/*
 128 * helper to check if a given offset is inside a given entry
 129 */
 130static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
 131{
 132	if (file_offset < entry->file_offset ||
 133	    entry->file_offset + entry->len <= file_offset)
 134		return 0;
 135	return 1;
 136}
 137
 138static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 139			  u64 len)
 140{
 141	if (file_offset + len <= entry->file_offset ||
 142	    entry->file_offset + entry->len <= file_offset)
 143		return 0;
 144	return 1;
 145}
 146
 147/*
 148 * look find the first ordered struct that has this offset, otherwise
 149 * the first one less than this offset
 150 */
 151static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
 152					  u64 file_offset)
 153{
 154	struct rb_root *root = &tree->tree;
 155	struct rb_node *prev = NULL;
 156	struct rb_node *ret;
 157	struct btrfs_ordered_extent *entry;
 158
 159	if (tree->last) {
 160		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
 161				 rb_node);
 162		if (offset_in_entry(entry, file_offset))
 163			return tree->last;
 164	}
 165	ret = __tree_search(root, file_offset, &prev);
 166	if (!ret)
 167		ret = prev;
 168	if (ret)
 169		tree->last = ret;
 170	return ret;
 171}
 172
 173/* allocate and add a new ordered_extent into the per-inode tree.
 174 * file_offset is the logical offset in the file
 175 *
 176 * start is the disk block number of an extent already reserved in the
 177 * extent allocation tree
 
 
 
 
 
 
 
 178 *
 179 * len is the length of the extent
 
 180 *
 181 * The tree is given a single reference on the ordered extent that was
 182 * inserted.
 183 */
 184static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 185				      u64 start, u64 len, u64 disk_len,
 186				      int type, int dio, int compress_type)
 187{
 188	struct btrfs_root *root = BTRFS_I(inode)->root;
 189	struct btrfs_ordered_inode_tree *tree;
 
 
 190	struct rb_node *node;
 191	struct btrfs_ordered_extent *entry;
 
 192
 193	tree = &BTRFS_I(inode)->ordered_tree;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 195	if (!entry)
 196		return -ENOMEM;
 197
 198	entry->file_offset = file_offset;
 199	entry->start = start;
 200	entry->len = len;
 201	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
 202	    !(type == BTRFS_ORDERED_NOCOW))
 203		entry->csum_bytes_left = disk_len;
 204	entry->disk_len = disk_len;
 205	entry->bytes_left = len;
 206	entry->inode = igrab(inode);
 207	entry->compress_type = compress_type;
 208	entry->truncated_len = (u64)-1;
 209	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
 210		set_bit(type, &entry->flags);
 211
 212	if (dio)
 213		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
 
 
 
 214
 215	/* one ref for the tree */
 216	atomic_set(&entry->refs, 1);
 217	init_waitqueue_head(&entry->wait);
 218	INIT_LIST_HEAD(&entry->list);
 
 219	INIT_LIST_HEAD(&entry->root_extent_list);
 220	INIT_LIST_HEAD(&entry->work_list);
 221	init_completion(&entry->completion);
 222	INIT_LIST_HEAD(&entry->log_list);
 223
 224	trace_btrfs_ordered_extent_add(inode, entry);
 225
 226	spin_lock_irq(&tree->lock);
 227	node = tree_insert(&tree->tree, file_offset,
 228			   &entry->rb_node);
 229	if (node)
 230		ordered_data_tree_panic(inode, -EEXIST, file_offset);
 
 
 231	spin_unlock_irq(&tree->lock);
 232
 233	spin_lock(&root->ordered_extent_lock);
 234	list_add_tail(&entry->root_extent_list,
 235		      &root->ordered_extents);
 236	root->nr_ordered_extents++;
 237	if (root->nr_ordered_extents == 1) {
 238		spin_lock(&root->fs_info->ordered_root_lock);
 239		BUG_ON(!list_empty(&root->ordered_root));
 240		list_add_tail(&root->ordered_root,
 241			      &root->fs_info->ordered_roots);
 242		spin_unlock(&root->fs_info->ordered_root_lock);
 243	}
 244	spin_unlock(&root->ordered_extent_lock);
 245
 
 
 
 
 
 
 
 
 
 246	return 0;
 247}
 248
 249int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 250			     u64 start, u64 len, u64 disk_len, int type)
 251{
 252	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 253					  disk_len, type, 0,
 254					  BTRFS_COMPRESS_NONE);
 255}
 256
 257int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
 258				 u64 start, u64 len, u64 disk_len, int type)
 259{
 260	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 261					  disk_len, type, 1,
 262					  BTRFS_COMPRESS_NONE);
 263}
 264
 265int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
 266				      u64 start, u64 len, u64 disk_len,
 267				      int type, int compress_type)
 268{
 269	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 270					  disk_len, type, 0,
 271					  compress_type);
 272}
 273
 274/*
 275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 276 * when an ordered extent is finished.  If the list covers more than one
 277 * ordered extent, it is split across multiples.
 278 */
 279void btrfs_add_ordered_sum(struct inode *inode,
 280			   struct btrfs_ordered_extent *entry,
 281			   struct btrfs_ordered_sum *sum)
 282{
 283	struct btrfs_ordered_inode_tree *tree;
 284
 285	tree = &BTRFS_I(inode)->ordered_tree;
 286	spin_lock_irq(&tree->lock);
 287	list_add_tail(&sum->list, &entry->list);
 288	WARN_ON(entry->csum_bytes_left < sum->len);
 289	entry->csum_bytes_left -= sum->len;
 290	if (entry->csum_bytes_left == 0)
 291		wake_up(&entry->wait);
 292	spin_unlock_irq(&tree->lock);
 293}
 294
 
 
 
 
 
 
 
 
 295/*
 296 * this is used to account for finished IO across a given range
 297 * of the file.  The IO may span ordered extents.  If
 298 * a given ordered_extent is completely done, 1 is returned, otherwise
 299 * 0.
 300 *
 301 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 302 * to make sure this function only returns 1 once for a given ordered extent.
 
 
 
 
 303 *
 304 * file_offset is updated to one byte past the range that is recorded as
 305 * complete.  This allows you to walk forward in the file.
 306 */
 307int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 308				   struct btrfs_ordered_extent **cached,
 309				   u64 *file_offset, u64 io_size, int uptodate)
 310{
 311	struct btrfs_ordered_inode_tree *tree;
 
 
 312	struct rb_node *node;
 313	struct btrfs_ordered_extent *entry = NULL;
 314	int ret;
 315	unsigned long flags;
 316	u64 dec_end;
 317	u64 dec_start;
 318	u64 to_dec;
 
 
 
 
 
 
 
 319
 320	tree = &BTRFS_I(inode)->ordered_tree;
 321	spin_lock_irqsave(&tree->lock, flags);
 322	node = tree_search(tree, *file_offset);
 323	if (!node) {
 324		ret = 1;
 325		goto out;
 326	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327
 328	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 329	if (!offset_in_entry(entry, *file_offset)) {
 330		ret = 1;
 331		goto out;
 332	}
 
 
 
 
 
 
 
 
 
 333
 334	dec_start = max(*file_offset, entry->file_offset);
 335	dec_end = min(*file_offset + io_size, entry->file_offset +
 336		      entry->len);
 337	*file_offset = dec_end;
 338	if (dec_start > dec_end) {
 339		btrfs_crit(BTRFS_I(inode)->root->fs_info,
 340			"bad ordering dec_start %llu end %llu", dec_start, dec_end);
 341	}
 342	to_dec = dec_end - dec_start;
 343	if (to_dec > entry->bytes_left) {
 344		btrfs_crit(BTRFS_I(inode)->root->fs_info,
 345			"bad ordered accounting left %llu size %llu",
 346			entry->bytes_left, to_dec);
 347	}
 348	entry->bytes_left -= to_dec;
 349	if (!uptodate)
 350		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 351
 352	if (entry->bytes_left == 0) {
 353		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 354		if (waitqueue_active(&entry->wait))
 355			wake_up(&entry->wait);
 356	} else {
 357		ret = 1;
 358	}
 359out:
 360	if (!ret && cached && entry) {
 361		*cached = entry;
 362		atomic_inc(&entry->refs);
 
 
 
 
 363	}
 364	spin_unlock_irqrestore(&tree->lock, flags);
 365	return ret == 0;
 366}
 367
 368/*
 369 * this is used to account for finished IO across a given range
 370 * of the file.  The IO should not span ordered extents.  If
 371 * a given ordered_extent is completely done, 1 is returned, otherwise
 372 * 0.
 
 
 
 
 
 
 
 
 373 *
 374 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 375 * to make sure this function only returns 1 once for a given ordered extent.
 376 */
 377int btrfs_dec_test_ordered_pending(struct inode *inode,
 378				   struct btrfs_ordered_extent **cached,
 379				   u64 file_offset, u64 io_size, int uptodate)
 380{
 381	struct btrfs_ordered_inode_tree *tree;
 382	struct rb_node *node;
 383	struct btrfs_ordered_extent *entry = NULL;
 384	unsigned long flags;
 385	int ret;
 386
 387	tree = &BTRFS_I(inode)->ordered_tree;
 388	spin_lock_irqsave(&tree->lock, flags);
 389	if (cached && *cached) {
 390		entry = *cached;
 391		goto have_entry;
 392	}
 393
 394	node = tree_search(tree, file_offset);
 395	if (!node) {
 396		ret = 1;
 397		goto out;
 398	}
 399
 400	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 401have_entry:
 402	if (!offset_in_entry(entry, file_offset)) {
 403		ret = 1;
 404		goto out;
 405	}
 406
 407	if (io_size > entry->bytes_left) {
 408		btrfs_crit(BTRFS_I(inode)->root->fs_info,
 409			   "bad ordered accounting left %llu size %llu",
 410		       entry->bytes_left, io_size);
 411	}
 412	entry->bytes_left -= io_size;
 413	if (!uptodate)
 414		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 415
 416	if (entry->bytes_left == 0) {
 417		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 418		if (waitqueue_active(&entry->wait))
 419			wake_up(&entry->wait);
 420	} else {
 421		ret = 1;
 
 
 422	}
 423out:
 424	if (!ret && cached && entry) {
 425		*cached = entry;
 426		atomic_inc(&entry->refs);
 
 427	}
 428	spin_unlock_irqrestore(&tree->lock, flags);
 429	return ret == 0;
 430}
 431
 432/* Needs to either be called under a log transaction or the log_mutex */
 433void btrfs_get_logged_extents(struct inode *inode,
 434			      struct list_head *logged_list)
 435{
 436	struct btrfs_ordered_inode_tree *tree;
 437	struct btrfs_ordered_extent *ordered;
 438	struct rb_node *n;
 439
 440	tree = &BTRFS_I(inode)->ordered_tree;
 441	spin_lock_irq(&tree->lock);
 442	for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
 443		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
 444		if (!list_empty(&ordered->log_list))
 445			continue;
 446		list_add_tail(&ordered->log_list, logged_list);
 447		atomic_inc(&ordered->refs);
 448	}
 449	spin_unlock_irq(&tree->lock);
 450}
 451
 452void btrfs_put_logged_extents(struct list_head *logged_list)
 453{
 454	struct btrfs_ordered_extent *ordered;
 455
 456	while (!list_empty(logged_list)) {
 457		ordered = list_first_entry(logged_list,
 458					   struct btrfs_ordered_extent,
 459					   log_list);
 460		list_del_init(&ordered->log_list);
 461		btrfs_put_ordered_extent(ordered);
 462	}
 463}
 464
 465void btrfs_submit_logged_extents(struct list_head *logged_list,
 466				 struct btrfs_root *log)
 467{
 468	int index = log->log_transid % 2;
 469
 470	spin_lock_irq(&log->log_extents_lock[index]);
 471	list_splice_tail(logged_list, &log->logged_list[index]);
 472	spin_unlock_irq(&log->log_extents_lock[index]);
 473}
 474
 475void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
 476{
 477	struct btrfs_ordered_extent *ordered;
 478	int index = transid % 2;
 479
 480	spin_lock_irq(&log->log_extents_lock[index]);
 481	while (!list_empty(&log->logged_list[index])) {
 482		ordered = list_first_entry(&log->logged_list[index],
 483					   struct btrfs_ordered_extent,
 484					   log_list);
 485		list_del_init(&ordered->log_list);
 486		spin_unlock_irq(&log->log_extents_lock[index]);
 487		wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
 488						   &ordered->flags));
 489		btrfs_put_ordered_extent(ordered);
 490		spin_lock_irq(&log->log_extents_lock[index]);
 491	}
 492	spin_unlock_irq(&log->log_extents_lock[index]);
 493}
 494
 495void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
 496{
 497	struct btrfs_ordered_extent *ordered;
 498	int index = transid % 2;
 499
 500	spin_lock_irq(&log->log_extents_lock[index]);
 501	while (!list_empty(&log->logged_list[index])) {
 502		ordered = list_first_entry(&log->logged_list[index],
 503					   struct btrfs_ordered_extent,
 504					   log_list);
 505		list_del_init(&ordered->log_list);
 506		spin_unlock_irq(&log->log_extents_lock[index]);
 507		btrfs_put_ordered_extent(ordered);
 508		spin_lock_irq(&log->log_extents_lock[index]);
 509	}
 510	spin_unlock_irq(&log->log_extents_lock[index]);
 511}
 512
 513/*
 514 * used to drop a reference on an ordered extent.  This will free
 515 * the extent if the last reference is dropped
 516 */
 517void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 518{
 519	struct list_head *cur;
 520	struct btrfs_ordered_sum *sum;
 521
 522	trace_btrfs_ordered_extent_put(entry->inode, entry);
 523
 524	if (atomic_dec_and_test(&entry->refs)) {
 
 
 
 525		if (entry->inode)
 526			btrfs_add_delayed_iput(entry->inode);
 527		while (!list_empty(&entry->list)) {
 528			cur = entry->list.next;
 529			sum = list_entry(cur, struct btrfs_ordered_sum, list);
 530			list_del(&sum->list);
 531			kfree(sum);
 532		}
 533		kmem_cache_free(btrfs_ordered_extent_cache, entry);
 534	}
 535}
 536
 537/*
 538 * remove an ordered extent from the tree.  No references are dropped
 539 * and waiters are woken up.
 540 */
 541void btrfs_remove_ordered_extent(struct inode *inode,
 542				 struct btrfs_ordered_extent *entry)
 543{
 544	struct btrfs_ordered_inode_tree *tree;
 545	struct btrfs_root *root = BTRFS_I(inode)->root;
 
 546	struct rb_node *node;
 
 
 
 
 
 
 
 
 547
 548	tree = &BTRFS_I(inode)->ordered_tree;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549	spin_lock_irq(&tree->lock);
 550	node = &entry->rb_node;
 551	rb_erase(node, &tree->tree);
 
 552	if (tree->last == node)
 553		tree->last = NULL;
 554	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 
 555	spin_unlock_irq(&tree->lock);
 556
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 557	spin_lock(&root->ordered_extent_lock);
 558	list_del_init(&entry->root_extent_list);
 559	root->nr_ordered_extents--;
 560
 561	trace_btrfs_ordered_extent_remove(inode, entry);
 562
 563	/*
 564	 * we have no more ordered extents for this inode and
 565	 * no dirty pages.  We can safely remove it from the
 566	 * list of ordered extents
 567	 */
 568	if (RB_EMPTY_ROOT(&tree->tree) &&
 569	    !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
 570		spin_lock(&root->fs_info->ordered_root_lock);
 571		list_del_init(&BTRFS_I(inode)->ordered_operations);
 572		spin_unlock(&root->fs_info->ordered_root_lock);
 573	}
 574
 575	if (!root->nr_ordered_extents) {
 576		spin_lock(&root->fs_info->ordered_root_lock);
 577		BUG_ON(list_empty(&root->ordered_root));
 578		list_del_init(&root->ordered_root);
 579		spin_unlock(&root->fs_info->ordered_root_lock);
 580	}
 581	spin_unlock(&root->ordered_extent_lock);
 582	wake_up(&entry->wait);
 
 
 583}
 584
 585static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 586{
 587	struct btrfs_ordered_extent *ordered;
 588
 589	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 590	btrfs_start_ordered_extent(ordered->inode, ordered, 1);
 591	complete(&ordered->completion);
 592}
 593
 594/*
 595 * wait for all the ordered extents in a root.  This is done when balancing
 596 * space between drives.
 597 */
 598int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
 
 599{
 600	struct list_head splice, works;
 
 
 
 601	struct btrfs_ordered_extent *ordered, *next;
 602	int count = 0;
 603
 604	INIT_LIST_HEAD(&splice);
 605	INIT_LIST_HEAD(&works);
 606
 607	mutex_lock(&root->ordered_extent_mutex);
 608	spin_lock(&root->ordered_extent_lock);
 609	list_splice_init(&root->ordered_extents, &splice);
 610	while (!list_empty(&splice) && nr) {
 611		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 612					   root_extent_list);
 
 
 
 
 
 
 
 
 613		list_move_tail(&ordered->root_extent_list,
 614			       &root->ordered_extents);
 615		atomic_inc(&ordered->refs);
 616		spin_unlock(&root->ordered_extent_lock);
 617
 618		btrfs_init_work(&ordered->flush_work,
 619				btrfs_run_ordered_extent_work, NULL, NULL);
 620		list_add_tail(&ordered->work_list, &works);
 621		btrfs_queue_work(root->fs_info->flush_workers,
 622				 &ordered->flush_work);
 623
 624		cond_resched();
 625		spin_lock(&root->ordered_extent_lock);
 626		if (nr != -1)
 627			nr--;
 628		count++;
 629	}
 
 630	list_splice_tail(&splice, &root->ordered_extents);
 631	spin_unlock(&root->ordered_extent_lock);
 632
 633	list_for_each_entry_safe(ordered, next, &works, work_list) {
 634		list_del_init(&ordered->work_list);
 635		wait_for_completion(&ordered->completion);
 636		btrfs_put_ordered_extent(ordered);
 637		cond_resched();
 638	}
 639	mutex_unlock(&root->ordered_extent_mutex);
 640
 641	return count;
 642}
 643
 644void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
 
 645{
 646	struct btrfs_root *root;
 647	struct list_head splice;
 648	int done;
 649
 650	INIT_LIST_HEAD(&splice);
 651
 652	mutex_lock(&fs_info->ordered_operations_mutex);
 653	spin_lock(&fs_info->ordered_root_lock);
 654	list_splice_init(&fs_info->ordered_roots, &splice);
 655	while (!list_empty(&splice) && nr) {
 656		root = list_first_entry(&splice, struct btrfs_root,
 657					ordered_root);
 658		root = btrfs_grab_fs_root(root);
 659		BUG_ON(!root);
 660		list_move_tail(&root->ordered_root,
 661			       &fs_info->ordered_roots);
 662		spin_unlock(&fs_info->ordered_root_lock);
 663
 664		done = btrfs_wait_ordered_extents(root, nr);
 665		btrfs_put_fs_root(root);
 
 666
 667		spin_lock(&fs_info->ordered_root_lock);
 668		if (nr != -1) {
 669			nr -= done;
 670			WARN_ON(nr < 0);
 671		}
 672	}
 673	list_splice_tail(&splice, &fs_info->ordered_roots);
 674	spin_unlock(&fs_info->ordered_root_lock);
 675	mutex_unlock(&fs_info->ordered_operations_mutex);
 676}
 677
 678/*
 679 * this is used during transaction commit to write all the inodes
 680 * added to the ordered operation list.  These files must be fully on
 681 * disk before the transaction commits.
 682 *
 683 * we have two modes here, one is to just start the IO via filemap_flush
 684 * and the other is to wait for all the io.  When we wait, we have an
 685 * extra check to make sure the ordered operation list really is empty
 686 * before we return
 687 */
 688int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
 689				 struct btrfs_root *root, int wait)
 690{
 691	struct btrfs_inode *btrfs_inode;
 692	struct inode *inode;
 693	struct btrfs_transaction *cur_trans = trans->transaction;
 694	struct list_head splice;
 695	struct list_head works;
 696	struct btrfs_delalloc_work *work, *next;
 697	int ret = 0;
 698
 699	INIT_LIST_HEAD(&splice);
 700	INIT_LIST_HEAD(&works);
 701
 702	mutex_lock(&root->fs_info->ordered_extent_flush_mutex);
 703	spin_lock(&root->fs_info->ordered_root_lock);
 704	list_splice_init(&cur_trans->ordered_operations, &splice);
 705	while (!list_empty(&splice)) {
 706		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
 707				   ordered_operations);
 708		inode = &btrfs_inode->vfs_inode;
 709
 710		list_del_init(&btrfs_inode->ordered_operations);
 711
 712		/*
 713		 * the inode may be getting freed (in sys_unlink path).
 714		 */
 715		inode = igrab(inode);
 716		if (!inode)
 717			continue;
 718
 719		if (!wait)
 720			list_add_tail(&BTRFS_I(inode)->ordered_operations,
 721				      &cur_trans->ordered_operations);
 722		spin_unlock(&root->fs_info->ordered_root_lock);
 723
 724		work = btrfs_alloc_delalloc_work(inode, wait, 1);
 725		if (!work) {
 726			spin_lock(&root->fs_info->ordered_root_lock);
 727			if (list_empty(&BTRFS_I(inode)->ordered_operations))
 728				list_add_tail(&btrfs_inode->ordered_operations,
 729					      &splice);
 730			list_splice_tail(&splice,
 731					 &cur_trans->ordered_operations);
 732			spin_unlock(&root->fs_info->ordered_root_lock);
 733			ret = -ENOMEM;
 734			goto out;
 735		}
 736		list_add_tail(&work->list, &works);
 737		btrfs_queue_work(root->fs_info->flush_workers,
 738				 &work->work);
 739
 740		cond_resched();
 741		spin_lock(&root->fs_info->ordered_root_lock);
 742	}
 743	spin_unlock(&root->fs_info->ordered_root_lock);
 744out:
 745	list_for_each_entry_safe(work, next, &works, list) {
 746		list_del_init(&work->list);
 747		btrfs_wait_and_free_delalloc_work(work);
 748	}
 749	mutex_unlock(&root->fs_info->ordered_extent_flush_mutex);
 750	return ret;
 751}
 752
 753/*
 754 * Used to start IO or wait for a given ordered extent to finish.
 755 *
 756 * If wait is one, this effectively waits on page writeback for all the pages
 757 * in the extent, and it waits on the io completion code to insert
 758 * metadata into the btree corresponding to the extent
 759 */
 760void btrfs_start_ordered_extent(struct inode *inode,
 761				       struct btrfs_ordered_extent *entry,
 762				       int wait)
 763{
 764	u64 start = entry->file_offset;
 765	u64 end = start + entry->len - 1;
 
 
 766
 767	trace_btrfs_ordered_extent_start(inode, entry);
 768
 769	/*
 
 
 
 
 
 
 770	 * pages in the range can be dirty, clean or writeback.  We
 771	 * start IO on any dirty ones so the wait doesn't stall waiting
 772	 * for the flusher thread to find them
 773	 */
 774	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 775		filemap_fdatawrite_range(inode->i_mapping, start, end);
 776	if (wait) {
 
 
 777		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
 778						 &entry->flags));
 779	}
 780}
 781
 782/*
 783 * Used to wait on ordered extents across a large range of bytes.
 784 */
 785int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 786{
 787	int ret = 0;
 
 788	u64 end;
 789	u64 orig_end;
 790	struct btrfs_ordered_extent *ordered;
 791
 792	if (start + len < start) {
 793		orig_end = INT_LIMIT(loff_t);
 794	} else {
 795		orig_end = start + len - 1;
 796		if (orig_end > INT_LIMIT(loff_t))
 797			orig_end = INT_LIMIT(loff_t);
 798	}
 799
 800	/* start IO across the range first to instantiate any delalloc
 801	 * extents
 802	 */
 803	ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 804	if (ret)
 805		return ret;
 
 806	/*
 807	 * So with compression we will find and lock a dirty page and clear the
 808	 * first one as dirty, setup an async extent, and immediately return
 809	 * with the entire range locked but with nobody actually marked with
 810	 * writeback.  So we can't just filemap_write_and_wait_range() and
 811	 * expect it to work since it will just kick off a thread to do the
 812	 * actual work.  So we need to call filemap_fdatawrite_range _again_
 813	 * since it will wait on the page lock, which won't be unlocked until
 814	 * after the pages have been marked as writeback and so we're good to go
 815	 * from there.  We have to do this otherwise we'll miss the ordered
 816	 * extents and that results in badness.  Please Josef, do not think you
 817	 * know better and pull this out at some point in the future, it is
 818	 * right and you are wrong.
 819	 */
 820	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
 821		     &BTRFS_I(inode)->runtime_flags)) {
 822		ret = filemap_fdatawrite_range(inode->i_mapping, start,
 823					       orig_end);
 824		if (ret)
 825			return ret;
 826	}
 827	ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 828	if (ret)
 829		return ret;
 830
 831	end = orig_end;
 832	while (1) {
 833		ordered = btrfs_lookup_first_ordered_extent(inode, end);
 834		if (!ordered)
 835			break;
 836		if (ordered->file_offset > orig_end) {
 837			btrfs_put_ordered_extent(ordered);
 838			break;
 839		}
 840		if (ordered->file_offset + ordered->len <= start) {
 841			btrfs_put_ordered_extent(ordered);
 842			break;
 843		}
 844		btrfs_start_ordered_extent(inode, ordered, 1);
 845		end = ordered->file_offset;
 
 
 
 
 
 846		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 847			ret = -EIO;
 848		btrfs_put_ordered_extent(ordered);
 849		if (ret || end == 0 || end == start)
 850			break;
 851		end--;
 852	}
 853	return ret;
 854}
 855
 856/*
 857 * find an ordered extent corresponding to file_offset.  return NULL if
 858 * nothing is found, otherwise take a reference on the extent and return it
 859 */
 860struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
 861							 u64 file_offset)
 862{
 863	struct btrfs_ordered_inode_tree *tree;
 864	struct rb_node *node;
 865	struct btrfs_ordered_extent *entry = NULL;
 
 866
 867	tree = &BTRFS_I(inode)->ordered_tree;
 868	spin_lock_irq(&tree->lock);
 869	node = tree_search(tree, file_offset);
 870	if (!node)
 871		goto out;
 872
 873	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 874	if (!offset_in_entry(entry, file_offset))
 875		entry = NULL;
 876	if (entry)
 877		atomic_inc(&entry->refs);
 
 
 878out:
 879	spin_unlock_irq(&tree->lock);
 880	return entry;
 881}
 882
 883/* Since the DIO code tries to lock a wide area we need to look for any ordered
 884 * extents that exist in the range, rather than just the start of the range.
 885 */
 886struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
 887							u64 file_offset,
 888							u64 len)
 889{
 890	struct btrfs_ordered_inode_tree *tree;
 891	struct rb_node *node;
 892	struct btrfs_ordered_extent *entry = NULL;
 893
 894	tree = &BTRFS_I(inode)->ordered_tree;
 895	spin_lock_irq(&tree->lock);
 896	node = tree_search(tree, file_offset);
 897	if (!node) {
 898		node = tree_search(tree, file_offset + len);
 899		if (!node)
 900			goto out;
 901	}
 902
 903	while (1) {
 904		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 905		if (range_overlaps(entry, file_offset, len))
 906			break;
 907
 908		if (entry->file_offset >= file_offset + len) {
 909			entry = NULL;
 910			break;
 911		}
 912		entry = NULL;
 913		node = rb_next(node);
 914		if (!node)
 915			break;
 916	}
 917out:
 918	if (entry)
 919		atomic_inc(&entry->refs);
 
 
 920	spin_unlock_irq(&tree->lock);
 921	return entry;
 922}
 923
 924/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925 * lookup and return any extent before 'file_offset'.  NULL is returned
 926 * if none is found
 927 */
 928struct btrfs_ordered_extent *
 929btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
 930{
 931	struct btrfs_ordered_inode_tree *tree;
 932	struct rb_node *node;
 933	struct btrfs_ordered_extent *entry = NULL;
 934
 935	tree = &BTRFS_I(inode)->ordered_tree;
 936	spin_lock_irq(&tree->lock);
 937	node = tree_search(tree, file_offset);
 938	if (!node)
 939		goto out;
 940
 941	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 942	atomic_inc(&entry->refs);
 
 943out:
 944	spin_unlock_irq(&tree->lock);
 945	return entry;
 946}
 947
 948/*
 949 * After an extent is done, call this to conditionally update the on disk
 950 * i_size.  i_size is updated to cover any fully written part of the file.
 
 
 
 
 
 951 */
 952int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 953				struct btrfs_ordered_extent *ordered)
 954{
 955	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 956	u64 disk_i_size;
 957	u64 new_i_size;
 958	u64 i_size = i_size_read(inode);
 959	struct rb_node *node;
 960	struct rb_node *prev = NULL;
 961	struct btrfs_ordered_extent *test;
 962	int ret = 1;
 
 963
 964	spin_lock_irq(&tree->lock);
 965	if (ordered) {
 966		offset = entry_end(ordered);
 967		if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
 968			offset = min(offset,
 969				     ordered->file_offset +
 970				     ordered->truncated_len);
 971	} else {
 972		offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
 973	}
 974	disk_i_size = BTRFS_I(inode)->disk_i_size;
 975
 976	/* truncate file */
 977	if (disk_i_size > i_size) {
 978		BTRFS_I(inode)->disk_i_size = i_size;
 979		ret = 0;
 980		goto out;
 981	}
 982
 983	/*
 984	 * if the disk i_size is already at the inode->i_size, or
 985	 * this ordered extent is inside the disk i_size, we're done
 
 
 986	 */
 987	if (disk_i_size == i_size)
 988		goto out;
 989
 990	/*
 991	 * We still need to update disk_i_size if outstanding_isize is greater
 992	 * than disk_i_size.
 993	 */
 994	if (offset <= disk_i_size &&
 995	    (!ordered || ordered->outstanding_isize <= disk_i_size))
 
 
 
 
 
 
 
 
 996		goto out;
 
 997
 998	/*
 999	 * walk backward from this ordered extent to disk_i_size.
1000	 * if we find an ordered extent then we can't update disk i_size
1001	 * yet
1002	 */
1003	if (ordered) {
1004		node = rb_prev(&ordered->rb_node);
1005	} else {
1006		prev = tree_search(tree, offset);
1007		/*
1008		 * we insert file extents without involving ordered struct,
1009		 * so there should be no ordered struct cover this offset
1010		 */
1011		if (prev) {
1012			test = rb_entry(prev, struct btrfs_ordered_extent,
1013					rb_node);
1014			BUG_ON(offset_in_entry(test, offset));
1015		}
1016		node = prev;
1017	}
1018	for (; node; node = rb_prev(node)) {
1019		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1020
1021		/* We treat this entry as if it doesnt exist */
1022		if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
1023			continue;
1024		if (test->file_offset + test->len <= disk_i_size)
1025			break;
1026		if (test->file_offset >= i_size)
1027			break;
1028		if (entry_end(test) > disk_i_size) {
1029			/*
1030			 * we don't update disk_i_size now, so record this
1031			 * undealt i_size. Or we will not know the real
1032			 * i_size.
1033			 */
1034			if (test->outstanding_isize < offset)
1035				test->outstanding_isize = offset;
1036			if (ordered &&
1037			    ordered->outstanding_isize >
1038			    test->outstanding_isize)
1039				test->outstanding_isize =
1040						ordered->outstanding_isize;
1041			goto out;
1042		}
1043	}
1044	new_i_size = min_t(u64, offset, i_size);
 
 
 
 
 
 
1045
1046	/*
1047	 * Some ordered extents may completed before the current one, and
1048	 * we hold the real i_size in ->outstanding_isize.
1049	 */
1050	if (ordered && ordered->outstanding_isize > new_i_size)
1051		new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
1052	BTRFS_I(inode)->disk_i_size = new_i_size;
1053	ret = 0;
1054out:
1055	/*
1056	 * We need to do this because we can't remove ordered extents until
1057	 * after the i_disk_size has been updated and then the inode has been
1058	 * updated to reflect the change, so we need to tell anybody who finds
1059	 * this ordered extent that we've already done all the real work, we
1060	 * just haven't completed all the other work.
1061	 */
1062	if (ordered)
1063		set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1064	spin_unlock_irq(&tree->lock);
1065	return ret;
1066}
1067
1068/*
1069 * search the ordered extents for one corresponding to 'offset' and
1070 * try to find a checksum.  This is used because we allow pages to
1071 * be reclaimed before their checksum is actually put into the btree
 
 
 
 
 
 
 
 
 
1072 */
1073int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1074			   u32 *sum, int len)
 
1075{
1076	struct btrfs_ordered_sum *ordered_sum;
1077	struct btrfs_ordered_extent *ordered;
1078	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1079	unsigned long num_sectors;
1080	unsigned long i;
1081	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
1082	int index = 0;
1083
1084	ordered = btrfs_lookup_ordered_extent(inode, offset);
1085	if (!ordered)
1086		return 0;
1087
1088	spin_lock_irq(&tree->lock);
1089	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1090		if (disk_bytenr >= ordered_sum->bytenr &&
1091		    disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1092			i = (disk_bytenr - ordered_sum->bytenr) >>
1093			    inode->i_sb->s_blocksize_bits;
1094			num_sectors = ordered_sum->len >>
1095				      inode->i_sb->s_blocksize_bits;
1096			num_sectors = min_t(int, len - index, num_sectors - i);
1097			memcpy(sum + index, ordered_sum->sums + i,
1098			       num_sectors);
1099
1100			index += (int)num_sectors;
1101			if (index == len)
1102				goto out;
1103			disk_bytenr += num_sectors * sectorsize;
1104		}
 
 
 
1105	}
1106out:
1107	spin_unlock_irq(&tree->lock);
1108	btrfs_put_ordered_extent(ordered);
1109	return index;
1110}
1111
1112
1113/*
1114 * add a given inode to the list of inodes that must be fully on
1115 * disk before a transaction commit finishes.
1116 *
1117 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1118 * used to make sure renamed files are fully on disk.
1119 *
1120 * It is a noop if the inode is already fully on disk.
1121 *
1122 * If trans is not null, we'll do a friendly check for a transaction that
1123 * is already flushing things and force the IO down ourselves.
1124 */
1125void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
1126				 struct btrfs_root *root, struct inode *inode)
1127{
1128	struct btrfs_transaction *cur_trans = trans->transaction;
1129	u64 last_mod;
1130
1131	last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1132
1133	/*
1134	 * if this file hasn't been changed since the last transaction
1135	 * commit, we can safely return without doing anything
1136	 */
1137	if (last_mod <= root->fs_info->last_trans_committed)
1138		return;
 
 
 
 
 
1139
1140	spin_lock(&root->fs_info->ordered_root_lock);
1141	if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
1142		list_add_tail(&BTRFS_I(inode)->ordered_operations,
1143			      &cur_trans->ordered_operations);
1144	}
1145	spin_unlock(&root->fs_info->ordered_root_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1146}
1147
1148int __init ordered_data_init(void)
1149{
1150	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1151				     sizeof(struct btrfs_ordered_extent), 0,
1152				     SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1153				     NULL);
1154	if (!btrfs_ordered_extent_cache)
1155		return -ENOMEM;
1156
1157	return 0;
1158}
1159
1160void ordered_data_exit(void)
1161{
1162	if (btrfs_ordered_extent_cache)
1163		kmem_cache_destroy(btrfs_ordered_extent_cache);
1164}