Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/blkdev.h>
   8#include <linux/writeback.h>
   9#include <linux/sched/mm.h>
  10#include "messages.h"
  11#include "misc.h"
  12#include "ctree.h"
  13#include "transaction.h"
  14#include "btrfs_inode.h"
  15#include "extent_io.h"
  16#include "disk-io.h"
  17#include "compression.h"
  18#include "delalloc-space.h"
  19#include "qgroup.h"
  20#include "subpage.h"
  21#include "file.h"
  22#include "block-group.h"
  23
  24static struct kmem_cache *btrfs_ordered_extent_cache;
  25
  26static u64 entry_end(struct btrfs_ordered_extent *entry)
  27{
  28	if (entry->file_offset + entry->num_bytes < entry->file_offset)
  29		return (u64)-1;
  30	return entry->file_offset + entry->num_bytes;
  31}
  32
  33/* returns NULL if the insertion worked, or it returns the node it did find
  34 * in the tree
  35 */
  36static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  37				   struct rb_node *node)
  38{
  39	struct rb_node **p = &root->rb_node;
  40	struct rb_node *parent = NULL;
  41	struct btrfs_ordered_extent *entry;
  42
  43	while (*p) {
  44		parent = *p;
  45		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  46
  47		if (file_offset < entry->file_offset)
  48			p = &(*p)->rb_left;
  49		else if (file_offset >= entry_end(entry))
  50			p = &(*p)->rb_right;
  51		else
  52			return parent;
  53	}
  54
  55	rb_link_node(node, parent, p);
  56	rb_insert_color(node, root);
  57	return NULL;
  58}
  59
 
 
 
 
 
 
 
 
  60/*
  61 * look for a given offset in the tree, and if it can't be found return the
  62 * first lesser offset
  63 */
  64static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  65				     struct rb_node **prev_ret)
  66{
  67	struct rb_node *n = root->rb_node;
  68	struct rb_node *prev = NULL;
  69	struct rb_node *test;
  70	struct btrfs_ordered_extent *entry;
  71	struct btrfs_ordered_extent *prev_entry = NULL;
  72
  73	while (n) {
  74		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  75		prev = n;
  76		prev_entry = entry;
  77
  78		if (file_offset < entry->file_offset)
  79			n = n->rb_left;
  80		else if (file_offset >= entry_end(entry))
  81			n = n->rb_right;
  82		else
  83			return n;
  84	}
  85	if (!prev_ret)
  86		return NULL;
  87
  88	while (prev && file_offset >= entry_end(prev_entry)) {
  89		test = rb_next(prev);
  90		if (!test)
  91			break;
  92		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  93				      rb_node);
  94		if (file_offset < entry_end(prev_entry))
  95			break;
  96
  97		prev = test;
  98	}
  99	if (prev)
 100		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 101				      rb_node);
 102	while (prev && file_offset < entry_end(prev_entry)) {
 103		test = rb_prev(prev);
 104		if (!test)
 105			break;
 106		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 107				      rb_node);
 108		prev = test;
 109	}
 110	*prev_ret = prev;
 111	return NULL;
 112}
 113
 114static int btrfs_range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 115				u64 len)
 
 
 
 
 
 
 
 
 
 
 
 116{
 117	if (file_offset + len <= entry->file_offset ||
 118	    entry->file_offset + entry->num_bytes <= file_offset)
 119		return 0;
 120	return 1;
 121}
 122
 123/*
 124 * look find the first ordered struct that has this offset, otherwise
 125 * the first one less than this offset
 126 */
 127static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
 128						  u64 file_offset)
 129{
 
 130	struct rb_node *prev = NULL;
 131	struct rb_node *ret;
 132	struct btrfs_ordered_extent *entry;
 133
 134	if (inode->ordered_tree_last) {
 135		entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
 136				 rb_node);
 137		if (in_range(file_offset, entry->file_offset, entry->num_bytes))
 138			return inode->ordered_tree_last;
 139	}
 140	ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
 141	if (!ret)
 142		ret = prev;
 143	if (ret)
 144		inode->ordered_tree_last = ret;
 145	return ret;
 146}
 147
 148static struct btrfs_ordered_extent *alloc_ordered_extent(
 149			struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
 150			u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
 151			u64 offset, unsigned long flags, int compress_type)
 
 
 
 
 
 
 
 
 
 
 152{
 
 
 
 
 153	struct btrfs_ordered_extent *entry;
 154	int ret;
 155	u64 qgroup_rsv = 0;
 156
 157	if (flags &
 158	    ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
 159		/* For nocow write, we can release the qgroup rsv right now */
 160		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
 161		if (ret < 0)
 162			return ERR_PTR(ret);
 163	} else {
 164		/*
 165		 * The ordered extent has reserved qgroup space, release now
 166		 * and pass the reserved number for qgroup_record to free.
 167		 */
 168		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
 169		if (ret < 0)
 170			return ERR_PTR(ret);
 171	}
 172	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 173	if (!entry)
 174		return ERR_PTR(-ENOMEM);
 175
 176	entry->file_offset = file_offset;
 177	entry->num_bytes = num_bytes;
 178	entry->ram_bytes = ram_bytes;
 179	entry->disk_bytenr = disk_bytenr;
 180	entry->disk_num_bytes = disk_num_bytes;
 181	entry->offset = offset;
 182	entry->bytes_left = num_bytes;
 183	entry->inode = BTRFS_I(igrab(&inode->vfs_inode));
 184	entry->compress_type = compress_type;
 185	entry->truncated_len = (u64)-1;
 186	entry->qgroup_rsv = qgroup_rsv;
 187	entry->flags = flags;
 
 
 
 
 
 
 
 
 188	refcount_set(&entry->refs, 1);
 189	init_waitqueue_head(&entry->wait);
 190	INIT_LIST_HEAD(&entry->list);
 191	INIT_LIST_HEAD(&entry->log_list);
 192	INIT_LIST_HEAD(&entry->root_extent_list);
 193	INIT_LIST_HEAD(&entry->work_list);
 194	INIT_LIST_HEAD(&entry->bioc_list);
 195	init_completion(&entry->completion);
 196
 197	/*
 198	 * We don't need the count_max_extents here, we can assume that all of
 199	 * that work has been done at higher layers, so this is truly the
 200	 * smallest the extent is going to get.
 201	 */
 202	spin_lock(&inode->lock);
 203	btrfs_mod_outstanding_extents(inode, 1);
 204	spin_unlock(&inode->lock);
 205
 206	return entry;
 207}
 208
 209static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
 210{
 211	struct btrfs_inode *inode = entry->inode;
 212	struct btrfs_root *root = inode->root;
 213	struct btrfs_fs_info *fs_info = root->fs_info;
 214	struct rb_node *node;
 215
 216	trace_btrfs_ordered_extent_add(inode, entry);
 217
 218	percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
 219				 fs_info->delalloc_batch);
 220
 221	/* One ref for the tree. */
 222	refcount_inc(&entry->refs);
 223
 224	spin_lock_irq(&inode->ordered_tree_lock);
 225	node = tree_insert(&inode->ordered_tree, entry->file_offset,
 226			   &entry->rb_node);
 227	if (unlikely(node))
 228		btrfs_panic(fs_info, -EEXIST,
 229				"inconsistency in ordered tree at offset %llu",
 230				entry->file_offset);
 231	spin_unlock_irq(&inode->ordered_tree_lock);
 232
 233	spin_lock(&root->ordered_extent_lock);
 234	list_add_tail(&entry->root_extent_list,
 235		      &root->ordered_extents);
 236	root->nr_ordered_extents++;
 237	if (root->nr_ordered_extents == 1) {
 238		spin_lock(&fs_info->ordered_root_lock);
 239		BUG_ON(!list_empty(&root->ordered_root));
 240		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
 241		spin_unlock(&fs_info->ordered_root_lock);
 242	}
 243	spin_unlock(&root->ordered_extent_lock);
 244}
 245
 246/*
 247 * Add an ordered extent to the per-inode tree.
 248 *
 249 * @inode:           Inode that this extent is for.
 250 * @file_offset:     Logical offset in file where the extent starts.
 251 * @num_bytes:       Logical length of extent in file.
 252 * @ram_bytes:       Full length of unencoded data.
 253 * @disk_bytenr:     Offset of extent on disk.
 254 * @disk_num_bytes:  Size of extent on disk.
 255 * @offset:          Offset into unencoded data where file data starts.
 256 * @flags:           Flags specifying type of extent (1 << BTRFS_ORDERED_*).
 257 * @compress_type:   Compression algorithm used for data.
 258 *
 259 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
 260 * tree is given a single reference on the ordered extent that was inserted, and
 261 * the returned pointer is given a second reference.
 262 *
 263 * Return: the new ordered extent or error pointer.
 264 */
 265struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
 266			struct btrfs_inode *inode, u64 file_offset,
 267			const struct btrfs_file_extent *file_extent, unsigned long flags)
 268{
 269	struct btrfs_ordered_extent *entry;
 270
 271	ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
 272
 273	/*
 274	 * For regular writes, we just use the members in @file_extent.
 275	 *
 276	 * For NOCOW, we don't really care about the numbers except @start and
 277	 * file_extent->num_bytes, as we won't insert a file extent item at all.
 278	 *
 279	 * For PREALLOC, we do not use ordered extent members, but
 280	 * btrfs_mark_extent_written() handles everything.
 281	 *
 282	 * So here we always pass 0 as offset for NOCOW/PREALLOC ordered extents,
 283	 * or btrfs_split_ordered_extent() cannot handle it correctly.
 284	 */
 285	if (flags & ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)))
 286		entry = alloc_ordered_extent(inode, file_offset,
 287					     file_extent->num_bytes,
 288					     file_extent->num_bytes,
 289					     file_extent->disk_bytenr + file_extent->offset,
 290					     file_extent->num_bytes, 0, flags,
 291					     file_extent->compression);
 292	else
 293		entry = alloc_ordered_extent(inode, file_offset,
 294					     file_extent->num_bytes,
 295					     file_extent->ram_bytes,
 296					     file_extent->disk_bytenr,
 297					     file_extent->disk_num_bytes,
 298					     file_extent->offset, flags,
 299					     file_extent->compression);
 300	if (!IS_ERR(entry))
 301		insert_ordered_extent(entry);
 302	return entry;
 303}
 304
 305/*
 306 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 307 * when an ordered extent is finished.  If the list covers more than one
 308 * ordered extent, it is split across multiples.
 309 */
 310void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
 311			   struct btrfs_ordered_sum *sum)
 312{
 313	struct btrfs_inode *inode = entry->inode;
 314
 315	spin_lock_irq(&inode->ordered_tree_lock);
 316	list_add_tail(&sum->list, &entry->list);
 317	spin_unlock_irq(&inode->ordered_tree_lock);
 318}
 319
 320void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
 321{
 322	if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 323		mapping_set_error(ordered->inode->vfs_inode.i_mapping, -EIO);
 324}
 325
 326static void finish_ordered_fn(struct btrfs_work *work)
 
 327{
 328	struct btrfs_ordered_extent *ordered_extent;
 329
 330	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
 331	btrfs_finish_ordered_io(ordered_extent);
 332}
 333
 334static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
 335				      struct folio *folio, u64 file_offset,
 336				      u64 len, bool uptodate)
 337{
 338	struct btrfs_inode *inode = ordered->inode;
 339	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 340
 341	lockdep_assert_held(&inode->ordered_tree_lock);
 342
 343	if (folio) {
 344		ASSERT(folio->mapping);
 345		ASSERT(folio_pos(folio) <= file_offset);
 346		ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio));
 347
 348		/*
 349		 * Ordered flag indicates whether we still have
 350		 * pending io unfinished for the ordered extent.
 351		 *
 352		 * If it's not set, we need to skip to next range.
 353		 */
 354		if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
 355			return false;
 356		btrfs_folio_clear_ordered(fs_info, folio, file_offset, len);
 357	}
 358
 359	/* Now we're fine to update the accounting. */
 360	if (WARN_ON_ONCE(len > ordered->bytes_left)) {
 361		btrfs_crit(fs_info,
 362"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
 363			   btrfs_root_id(inode->root), btrfs_ino(inode),
 364			   ordered->file_offset, ordered->num_bytes,
 365			   len, ordered->bytes_left);
 366		ordered->bytes_left = 0;
 367	} else {
 368		ordered->bytes_left -= len;
 369	}
 370
 371	if (!uptodate)
 372		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
 373
 374	if (ordered->bytes_left)
 375		return false;
 376
 377	/*
 378	 * All the IO of the ordered extent is finished, we need to queue
 379	 * the finish_func to be executed.
 380	 */
 381	set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
 382	cond_wake_up(&ordered->wait);
 383	refcount_inc(&ordered->refs);
 384	trace_btrfs_ordered_extent_mark_finished(inode, ordered);
 385	return true;
 386}
 387
 388static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
 
 
 389{
 390	struct btrfs_inode *inode = ordered->inode;
 391	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 392	struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
 393		fs_info->endio_freespace_worker : fs_info->endio_write_workers;
 394
 395	btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
 396	btrfs_queue_work(wq, &ordered->work);
 397}
 398
 399void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
 400				 struct folio *folio, u64 file_offset, u64 len,
 401				 bool uptodate)
 
 
 
 
 402{
 403	struct btrfs_inode *inode = ordered->inode;
 404	unsigned long flags;
 405	bool ret;
 406
 407	trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
 408
 409	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 410	ret = can_finish_ordered_extent(ordered, folio, file_offset, len,
 411					uptodate);
 412	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 413
 414	/*
 415	 * If this is a COW write it means we created new extent maps for the
 416	 * range and they point to unwritten locations if we got an error either
 417	 * before submitting a bio or during IO.
 418	 *
 419	 * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
 420	 * are queuing its completion below. During completion, at
 421	 * btrfs_finish_one_ordered(), we will drop the extent maps for the
 422	 * unwritten extents.
 423	 *
 424	 * However because completion runs in a work queue we can end up having
 425	 * a fast fsync running before that. In the case of direct IO, once we
 426	 * unlock the inode the fsync might start, and we queue the completion
 427	 * before unlocking the inode. In the case of buffered IO when writeback
 428	 * finishes (end_bbio_data_write()) we queue the completion, so if the
 429	 * writeback was triggered by a fast fsync, the fsync might start
 430	 * logging before ordered extent completion runs in the work queue.
 431	 *
 432	 * The fast fsync will log file extent items based on the extent maps it
 433	 * finds, so if by the time it collects extent maps the ordered extent
 434	 * completion didn't happen yet, it will log file extent items that
 435	 * point to unwritten extents, resulting in a corruption if a crash
 436	 * happens and the log tree is replayed. Note that a fast fsync does not
 437	 * wait for completion of ordered extents in order to reduce latency.
 438	 *
 439	 * Set a flag in the inode so that the next fast fsync will wait for
 440	 * ordered extents to complete before starting to log.
 441	 */
 442	if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
 443		set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
 444
 445	if (ret)
 446		btrfs_queue_ordered_fn(ordered);
 447}
 448
 449/*
 450 * Mark all ordered extents io inside the specified range finished.
 
 
 
 451 *
 452 * @folio:	 The involved folio for the operation.
 453 *		 For uncompressed buffered IO, the folio status also needs to be
 454 *		 updated to indicate whether the pending ordered io is finished.
 455 *		 Can be NULL for direct IO and compressed write.
 456 *		 For these cases, callers are ensured they won't execute the
 457 *		 endio function twice.
 458 *
 459 * This function is called for endio, thus the range must have ordered
 460 * extent(s) covering it.
 461 */
 462void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
 463				    struct folio *folio, u64 file_offset,
 464				    u64 num_bytes, bool uptodate)
 465{
 
 
 466	struct rb_node *node;
 467	struct btrfs_ordered_extent *entry = NULL;
 
 468	unsigned long flags;
 469	u64 cur = file_offset;
 470
 471	trace_btrfs_writepage_end_io_hook(inode, file_offset,
 472					  file_offset + num_bytes - 1,
 473					  uptodate);
 474
 475	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 476	while (cur < file_offset + num_bytes) {
 477		u64 entry_end;
 478		u64 end;
 479		u32 len;
 480
 481		node = ordered_tree_search(inode, cur);
 482		/* No ordered extents at all */
 483		if (!node)
 484			break;
 485
 486		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 487		entry_end = entry->file_offset + entry->num_bytes;
 488		/*
 489		 * |<-- OE --->|  |
 490		 *		  cur
 491		 * Go to next OE.
 492		 */
 493		if (cur >= entry_end) {
 494			node = rb_next(node);
 495			/* No more ordered extents, exit */
 496			if (!node)
 497				break;
 498			entry = rb_entry(node, struct btrfs_ordered_extent,
 499					 rb_node);
 500
 501			/* Go to next ordered extent and continue */
 502			cur = entry->file_offset;
 503			continue;
 504		}
 505		/*
 506		 * |	|<--- OE --->|
 507		 * cur
 508		 * Go to the start of OE.
 509		 */
 510		if (cur < entry->file_offset) {
 511			cur = entry->file_offset;
 512			continue;
 513		}
 
 
 
 
 514
 515		/*
 516		 * Now we are definitely inside one ordered extent.
 517		 *
 518		 * |<--- OE --->|
 519		 *	|
 520		 *	cur
 521		 */
 522		end = min(entry->file_offset + entry->num_bytes,
 523			  file_offset + num_bytes) - 1;
 524		ASSERT(end + 1 - cur < U32_MAX);
 525		len = end + 1 - cur;
 526
 527		if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
 528			spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 529			btrfs_queue_ordered_fn(entry);
 530			spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 531		}
 532		cur += len;
 533	}
 534	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 
 535}
 536
 537/*
 538 * Finish IO for one ordered extent across a given range.  The range can only
 539 * contain one ordered extent.
 
 
 540 *
 541 * @cached:	 The cached ordered extent. If not NULL, we can skip the tree
 542 *               search and use the ordered extent directly.
 543 * 		 Will be also used to store the finished ordered extent.
 544 * @file_offset: File offset for the finished IO
 545 * @io_size:	 Length of the finish IO range
 546 *
 547 * Return true if the ordered extent is finished in the range, and update
 548 * @cached.
 549 * Return false otherwise.
 550 *
 551 * NOTE: The range can NOT cross multiple ordered extents.
 552 * Thus caller should ensure the range doesn't cross ordered extents.
 553 */
 554bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
 555				    struct btrfs_ordered_extent **cached,
 556				    u64 file_offset, u64 io_size)
 557{
 
 558	struct rb_node *node;
 559	struct btrfs_ordered_extent *entry = NULL;
 560	unsigned long flags;
 561	bool finished = false;
 562
 563	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 
 564	if (cached && *cached) {
 565		entry = *cached;
 566		goto have_entry;
 567	}
 568
 569	node = ordered_tree_search(inode, file_offset);
 570	if (!node)
 
 571		goto out;
 
 572
 573	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 574have_entry:
 575	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 
 576		goto out;
 
 577
 578	if (io_size > entry->bytes_left)
 579		btrfs_crit(inode->root->fs_info,
 580			   "bad ordered accounting left %llu size %llu",
 581		       entry->bytes_left, io_size);
 582
 583	entry->bytes_left -= io_size;
 
 
 584
 585	if (entry->bytes_left == 0) {
 586		/*
 587		 * Ensure only one caller can set the flag and finished_ret
 588		 * accordingly
 589		 */
 590		finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 591		/* test_and_set_bit implies a barrier */
 592		cond_wake_up_nomb(&entry->wait);
 
 
 593	}
 594out:
 595	if (finished && cached && entry) {
 596		*cached = entry;
 597		refcount_inc(&entry->refs);
 598		trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
 599	}
 600	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 601	return finished;
 602}
 603
 604/*
 605 * used to drop a reference on an ordered extent.  This will free
 606 * the extent if the last reference is dropped
 607 */
 608void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 609{
 610	struct list_head *cur;
 611	struct btrfs_ordered_sum *sum;
 612
 613	trace_btrfs_ordered_extent_put(entry->inode, entry);
 614
 615	if (refcount_dec_and_test(&entry->refs)) {
 616		ASSERT(list_empty(&entry->root_extent_list));
 617		ASSERT(list_empty(&entry->log_list));
 
 
 618		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
 619		if (entry->inode)
 620			btrfs_add_delayed_iput(entry->inode);
 621		while (!list_empty(&entry->list)) {
 622			cur = entry->list.next;
 623			sum = list_entry(cur, struct btrfs_ordered_sum, list);
 624			list_del(&sum->list);
 625			kvfree(sum);
 626		}
 627		kmem_cache_free(btrfs_ordered_extent_cache, entry);
 628	}
 629}
 630
 631/*
 632 * remove an ordered extent from the tree.  No references are dropped
 633 * and waiters are woken up.
 634 */
 635void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
 636				 struct btrfs_ordered_extent *entry)
 637{
 
 
 
 638	struct btrfs_root *root = btrfs_inode->root;
 639	struct btrfs_fs_info *fs_info = root->fs_info;
 640	struct rb_node *node;
 641	bool pending;
 642	bool freespace_inode;
 643
 644	/*
 645	 * If this is a free space inode the thread has not acquired the ordered
 646	 * extents lockdep map.
 647	 */
 648	freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
 649
 650	btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
 651	/* This is paired with alloc_ordered_extent(). */
 652	spin_lock(&btrfs_inode->lock);
 653	btrfs_mod_outstanding_extents(btrfs_inode, -1);
 654	spin_unlock(&btrfs_inode->lock);
 655	if (root != fs_info->tree_root) {
 656		u64 release;
 657
 658		if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
 659			release = entry->disk_num_bytes;
 660		else
 661			release = entry->num_bytes;
 662		btrfs_delalloc_release_metadata(btrfs_inode, release,
 663						test_bit(BTRFS_ORDERED_IOERR,
 664							 &entry->flags));
 665	}
 666
 667	percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
 668				 fs_info->delalloc_batch);
 
 669
 670	spin_lock_irq(&btrfs_inode->ordered_tree_lock);
 
 671	node = &entry->rb_node;
 672	rb_erase(node, &btrfs_inode->ordered_tree);
 673	RB_CLEAR_NODE(node);
 674	if (btrfs_inode->ordered_tree_last == node)
 675		btrfs_inode->ordered_tree_last = NULL;
 676	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 677	pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
 678	spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
 679
 680	/*
 681	 * The current running transaction is waiting on us, we need to let it
 682	 * know that we're complete and wake it up.
 683	 */
 684	if (pending) {
 685		struct btrfs_transaction *trans;
 686
 687		/*
 688		 * The checks for trans are just a formality, it should be set,
 689		 * but if it isn't we don't want to deref/assert under the spin
 690		 * lock, so be nice and check if trans is set, but ASSERT() so
 691		 * if it isn't set a developer will notice.
 692		 */
 693		spin_lock(&fs_info->trans_lock);
 694		trans = fs_info->running_transaction;
 695		if (trans)
 696			refcount_inc(&trans->use_count);
 697		spin_unlock(&fs_info->trans_lock);
 698
 699		ASSERT(trans || BTRFS_FS_ERROR(fs_info));
 700		if (trans) {
 701			if (atomic_dec_and_test(&trans->pending_ordered))
 702				wake_up(&trans->pending_wait);
 703			btrfs_put_transaction(trans);
 704		}
 705	}
 706
 707	btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
 708
 709	spin_lock(&root->ordered_extent_lock);
 710	list_del_init(&entry->root_extent_list);
 711	root->nr_ordered_extents--;
 712
 713	trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
 714
 715	if (!root->nr_ordered_extents) {
 716		spin_lock(&fs_info->ordered_root_lock);
 717		BUG_ON(list_empty(&root->ordered_root));
 718		list_del_init(&root->ordered_root);
 719		spin_unlock(&fs_info->ordered_root_lock);
 720	}
 721	spin_unlock(&root->ordered_extent_lock);
 722	wake_up(&entry->wait);
 723	if (!freespace_inode)
 724		btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
 725}
 726
 727static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 728{
 729	struct btrfs_ordered_extent *ordered;
 730
 731	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 732	btrfs_start_ordered_extent(ordered);
 733	complete(&ordered->completion);
 734}
 735
 736/*
 737 * Wait for all the ordered extents in a root. Use @bg as range or do whole
 738 * range if it's NULL.
 739 */
 740u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 741			       const struct btrfs_block_group *bg)
 742{
 743	struct btrfs_fs_info *fs_info = root->fs_info;
 744	LIST_HEAD(splice);
 745	LIST_HEAD(skipped);
 746	LIST_HEAD(works);
 747	struct btrfs_ordered_extent *ordered, *next;
 748	u64 count = 0;
 749	u64 range_start, range_len;
 750	u64 range_end;
 751
 752	if (bg) {
 753		range_start = bg->start;
 754		range_len = bg->length;
 755	} else {
 756		range_start = 0;
 757		range_len = U64_MAX;
 758	}
 759	range_end = range_start + range_len;
 760
 761	mutex_lock(&root->ordered_extent_mutex);
 762	spin_lock(&root->ordered_extent_lock);
 763	list_splice_init(&root->ordered_extents, &splice);
 764	while (!list_empty(&splice) && nr) {
 765		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 766					   root_extent_list);
 767
 768		if (range_end <= ordered->disk_bytenr ||
 769		    ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
 770			list_move_tail(&ordered->root_extent_list, &skipped);
 771			cond_resched_lock(&root->ordered_extent_lock);
 772			continue;
 773		}
 774
 775		list_move_tail(&ordered->root_extent_list,
 776			       &root->ordered_extents);
 777		refcount_inc(&ordered->refs);
 778		spin_unlock(&root->ordered_extent_lock);
 779
 780		btrfs_init_work(&ordered->flush_work,
 781				btrfs_run_ordered_extent_work, NULL);
 
 782		list_add_tail(&ordered->work_list, &works);
 783		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 784
 785		cond_resched();
 
 786		if (nr != U64_MAX)
 787			nr--;
 788		count++;
 789		spin_lock(&root->ordered_extent_lock);
 790	}
 791	list_splice_tail(&skipped, &root->ordered_extents);
 792	list_splice_tail(&splice, &root->ordered_extents);
 793	spin_unlock(&root->ordered_extent_lock);
 794
 795	list_for_each_entry_safe(ordered, next, &works, work_list) {
 796		list_del_init(&ordered->work_list);
 797		wait_for_completion(&ordered->completion);
 798		btrfs_put_ordered_extent(ordered);
 799		cond_resched();
 800	}
 801	mutex_unlock(&root->ordered_extent_mutex);
 802
 803	return count;
 804}
 805
 806/*
 807 * Wait for @nr ordered extents that intersect the @bg, or the whole range of
 808 * the filesystem if @bg is NULL.
 809 */
 810void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 811			      const struct btrfs_block_group *bg)
 812{
 813	struct btrfs_root *root;
 814	LIST_HEAD(splice);
 
 815	u64 done;
 816
 
 
 817	mutex_lock(&fs_info->ordered_operations_mutex);
 818	spin_lock(&fs_info->ordered_root_lock);
 819	list_splice_init(&fs_info->ordered_roots, &splice);
 820	while (!list_empty(&splice) && nr) {
 821		root = list_first_entry(&splice, struct btrfs_root,
 822					ordered_root);
 823		root = btrfs_grab_root(root);
 824		BUG_ON(!root);
 825		list_move_tail(&root->ordered_root,
 826			       &fs_info->ordered_roots);
 827		spin_unlock(&fs_info->ordered_root_lock);
 828
 829		done = btrfs_wait_ordered_extents(root, nr, bg);
 830		btrfs_put_root(root);
 831
 832		if (nr != U64_MAX)
 833			nr -= done;
 834
 835		spin_lock(&fs_info->ordered_root_lock);
 
 
 
 836	}
 837	list_splice_tail(&splice, &fs_info->ordered_roots);
 838	spin_unlock(&fs_info->ordered_root_lock);
 839	mutex_unlock(&fs_info->ordered_operations_mutex);
 
 
 840}
 841
 842/*
 843 * Start IO and wait for a given ordered extent to finish.
 844 *
 845 * Wait on page writeback for all the pages in the extent and the IO completion
 846 * code to insert metadata into the btree corresponding to the extent.
 
 847 */
 848void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
 
 
 849{
 850	u64 start = entry->file_offset;
 851	u64 end = start + entry->num_bytes - 1;
 852	struct btrfs_inode *inode = entry->inode;
 853	bool freespace_inode;
 854
 855	trace_btrfs_ordered_extent_start(inode, entry);
 856
 857	/*
 858	 * If this is a free space inode do not take the ordered extents lockdep
 859	 * map.
 860	 */
 861	freespace_inode = btrfs_is_free_space_inode(inode);
 862
 863	/*
 864	 * pages in the range can be dirty, clean or writeback.  We
 865	 * start IO on any dirty ones so the wait doesn't stall waiting
 866	 * for the flusher thread to find them
 867	 */
 868	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 869		filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
 870
 871	if (!freespace_inode)
 872		btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
 873	wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
 874}
 875
 876/*
 877 * Used to wait on ordered extents across a large range of bytes.
 878 */
 879int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len)
 880{
 881	int ret = 0;
 882	int ret_wb = 0;
 883	u64 end;
 884	u64 orig_end;
 885	struct btrfs_ordered_extent *ordered;
 886
 887	if (start + len < start) {
 888		orig_end = OFFSET_MAX;
 889	} else {
 890		orig_end = start + len - 1;
 891		if (orig_end > OFFSET_MAX)
 892			orig_end = OFFSET_MAX;
 893	}
 894
 895	/* start IO across the range first to instantiate any delalloc
 896	 * extents
 897	 */
 898	ret = btrfs_fdatawrite_range(inode, start, orig_end);
 899	if (ret)
 900		return ret;
 901
 902	/*
 903	 * If we have a writeback error don't return immediately. Wait first
 904	 * for any ordered extents that haven't completed yet. This is to make
 905	 * sure no one can dirty the same page ranges and call writepages()
 906	 * before the ordered extents complete - to avoid failures (-EEXIST)
 907	 * when adding the new ordered extents to the ordered tree.
 908	 */
 909	ret_wb = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, orig_end);
 910
 911	end = orig_end;
 912	while (1) {
 913		ordered = btrfs_lookup_first_ordered_extent(inode, end);
 914		if (!ordered)
 915			break;
 916		if (ordered->file_offset > orig_end) {
 917			btrfs_put_ordered_extent(ordered);
 918			break;
 919		}
 920		if (ordered->file_offset + ordered->num_bytes <= start) {
 921			btrfs_put_ordered_extent(ordered);
 922			break;
 923		}
 924		btrfs_start_ordered_extent(ordered);
 925		end = ordered->file_offset;
 926		/*
 927		 * If the ordered extent had an error save the error but don't
 928		 * exit without waiting first for all other ordered extents in
 929		 * the range to complete.
 930		 */
 931		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 932			ret = -EIO;
 933		btrfs_put_ordered_extent(ordered);
 934		if (end == 0 || end == start)
 935			break;
 936		end--;
 937	}
 938	return ret_wb ? ret_wb : ret;
 939}
 940
 941/*
 942 * find an ordered extent corresponding to file_offset.  return NULL if
 943 * nothing is found, otherwise take a reference on the extent and return it
 944 */
 945struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
 946							 u64 file_offset)
 947{
 
 948	struct rb_node *node;
 949	struct btrfs_ordered_extent *entry = NULL;
 950	unsigned long flags;
 951
 952	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 953	node = ordered_tree_search(inode, file_offset);
 
 954	if (!node)
 955		goto out;
 956
 957	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 958	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 959		entry = NULL;
 960	if (entry) {
 961		refcount_inc(&entry->refs);
 962		trace_btrfs_ordered_extent_lookup(inode, entry);
 963	}
 964out:
 965	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 966	return entry;
 967}
 968
 969/* Since the DIO code tries to lock a wide area we need to look for any ordered
 970 * extents that exist in the range, rather than just the start of the range.
 971 */
 972struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 973		struct btrfs_inode *inode, u64 file_offset, u64 len)
 974{
 
 975	struct rb_node *node;
 976	struct btrfs_ordered_extent *entry = NULL;
 977
 978	spin_lock_irq(&inode->ordered_tree_lock);
 979	node = ordered_tree_search(inode, file_offset);
 
 980	if (!node) {
 981		node = ordered_tree_search(inode, file_offset + len);
 982		if (!node)
 983			goto out;
 984	}
 985
 986	while (1) {
 987		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 988		if (btrfs_range_overlaps(entry, file_offset, len))
 989			break;
 990
 991		if (entry->file_offset >= file_offset + len) {
 992			entry = NULL;
 993			break;
 994		}
 995		entry = NULL;
 996		node = rb_next(node);
 997		if (!node)
 998			break;
 999	}
1000out:
1001	if (entry) {
1002		refcount_inc(&entry->refs);
1003		trace_btrfs_ordered_extent_lookup_range(inode, entry);
1004	}
1005	spin_unlock_irq(&inode->ordered_tree_lock);
1006	return entry;
1007}
1008
1009/*
1010 * Adds all ordered extents to the given list. The list ends up sorted by the
1011 * file_offset of the ordered extents.
1012 */
1013void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
1014					   struct list_head *list)
1015{
1016	struct rb_node *n;
1017
1018	btrfs_assert_inode_locked(inode);
1019
1020	spin_lock_irq(&inode->ordered_tree_lock);
1021	for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
1022		struct btrfs_ordered_extent *ordered;
1023
1024		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
1025
1026		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
1027			continue;
1028
1029		ASSERT(list_empty(&ordered->log_list));
1030		list_add_tail(&ordered->log_list, list);
1031		refcount_inc(&ordered->refs);
1032		trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
1033	}
1034	spin_unlock_irq(&inode->ordered_tree_lock);
1035}
1036
1037/*
1038 * lookup and return any extent before 'file_offset'.  NULL is returned
1039 * if none is found
1040 */
1041struct btrfs_ordered_extent *
1042btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
1043{
 
1044	struct rb_node *node;
1045	struct btrfs_ordered_extent *entry = NULL;
1046
1047	spin_lock_irq(&inode->ordered_tree_lock);
1048	node = ordered_tree_search(inode, file_offset);
 
1049	if (!node)
1050		goto out;
1051
1052	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1053	refcount_inc(&entry->refs);
1054	trace_btrfs_ordered_extent_lookup_first(inode, entry);
1055out:
1056	spin_unlock_irq(&inode->ordered_tree_lock);
1057	return entry;
1058}
1059
1060/*
1061 * Lookup the first ordered extent that overlaps the range
1062 * [@file_offset, @file_offset + @len).
1063 *
1064 * The difference between this and btrfs_lookup_first_ordered_extent() is
1065 * that this one won't return any ordered extent that does not overlap the range.
1066 * And the difference against btrfs_lookup_ordered_extent() is, this function
1067 * ensures the first ordered extent gets returned.
1068 */
1069struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
1070			struct btrfs_inode *inode, u64 file_offset, u64 len)
1071{
 
 
 
 
1072	struct rb_node *node;
1073	struct rb_node *cur;
1074	struct rb_node *prev;
1075	struct rb_node *next;
1076	struct btrfs_ordered_extent *entry = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077
1078	spin_lock_irq(&inode->ordered_tree_lock);
1079	node = inode->ordered_tree.rb_node;
1080	/*
1081	 * Here we don't want to use tree_search() which will use tree->last
1082	 * and screw up the search order.
1083	 * And __tree_search() can't return the adjacent ordered extents
1084	 * either, thus here we do our own search.
1085	 */
1086	while (node) {
1087		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1088
1089		if (file_offset < entry->file_offset) {
1090			node = node->rb_left;
1091		} else if (file_offset >= entry_end(entry)) {
1092			node = node->rb_right;
1093		} else {
1094			/*
1095			 * Direct hit, got an ordered extent that starts at
1096			 * @file_offset
1097			 */
1098			goto out;
1099		}
1100	}
1101	if (!entry) {
1102		/* Empty tree */
1103		goto out;
1104	}
1105
1106	cur = &entry->rb_node;
1107	/* We got an entry around @file_offset, check adjacent entries */
1108	if (entry->file_offset < file_offset) {
1109		prev = cur;
1110		next = rb_next(cur);
 
 
1111	} else {
1112		prev = rb_prev(cur);
1113		next = cur;
1114	}
1115	if (prev) {
1116		entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1117		if (btrfs_range_overlaps(entry, file_offset, len))
1118			goto out;
 
 
 
 
1119	}
1120	if (next) {
1121		entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1122		if (btrfs_range_overlaps(entry, file_offset, len))
1123			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1124	}
1125	/* No ordered extent in the range */
1126	entry = NULL;
 
 
 
 
 
 
 
 
1127out:
1128	if (entry) {
1129		refcount_inc(&entry->refs);
1130		trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1131	}
 
 
 
 
 
 
 
 
1132
1133	spin_unlock_irq(&inode->ordered_tree_lock);
1134	return entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135}
1136
1137/*
1138 * Lock the passed range and ensures all pending ordered extents in it are run
1139 * to completion.
1140 *
 
1141 * @inode:        Inode whose ordered tree is to be searched
1142 * @start:        Beginning of range to flush
1143 * @end:          Last byte of range to lock
1144 * @cached_state: If passed, will return the extent state responsible for the
1145 *                locked range. It's the caller's responsibility to free the
1146 *                cached state.
1147 *
1148 * Always return with the given range locked, ensuring after it's called no
1149 * order extent can be pending.
1150 */
1151void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
 
1152					u64 end,
1153					struct extent_state **cached_state)
1154{
1155	struct btrfs_ordered_extent *ordered;
1156	struct extent_state *cache = NULL;
1157	struct extent_state **cachedp = &cache;
1158
1159	if (cached_state)
1160		cachedp = cached_state;
1161
1162	while (1) {
1163		lock_extent(&inode->io_tree, start, end, cachedp);
1164		ordered = btrfs_lookup_ordered_range(inode, start,
1165						     end - start + 1);
1166		if (!ordered) {
1167			/*
1168			 * If no external cached_state has been passed then
1169			 * decrement the extra ref taken for cachedp since we
1170			 * aren't exposing it outside of this function
1171			 */
1172			if (!cached_state)
1173				refcount_dec(&cache->refs);
1174			break;
1175		}
1176		unlock_extent(&inode->io_tree, start, end, cachedp);
1177		btrfs_start_ordered_extent(ordered);
1178		btrfs_put_ordered_extent(ordered);
1179	}
1180}
1181
1182/*
1183 * Lock the passed range and ensure all pending ordered extents in it are run
1184 * to completion in nowait mode.
1185 *
1186 * Return true if btrfs_lock_ordered_range does not return any extents,
1187 * otherwise false.
1188 */
1189bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1190				  struct extent_state **cached_state)
1191{
1192	struct btrfs_ordered_extent *ordered;
1193
1194	if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1195		return false;
1196
1197	ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1198	if (!ordered)
1199		return true;
1200
1201	btrfs_put_ordered_extent(ordered);
1202	unlock_extent(&inode->io_tree, start, end, cached_state);
1203
1204	return false;
1205}
1206
1207/* Split out a new ordered extent for this first @len bytes of @ordered. */
1208struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1209			struct btrfs_ordered_extent *ordered, u64 len)
1210{
1211	struct btrfs_inode *inode = ordered->inode;
1212	struct btrfs_root *root = inode->root;
1213	struct btrfs_fs_info *fs_info = root->fs_info;
1214	u64 file_offset = ordered->file_offset;
1215	u64 disk_bytenr = ordered->disk_bytenr;
1216	unsigned long flags = ordered->flags;
1217	struct btrfs_ordered_sum *sum, *tmpsum;
1218	struct btrfs_ordered_extent *new;
1219	struct rb_node *node;
1220	u64 offset = 0;
1221
1222	trace_btrfs_ordered_extent_split(inode, ordered);
1223
1224	ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1225
1226	/*
1227	 * The entire bio must be covered by the ordered extent, but we can't
1228	 * reduce the original extent to a zero length either.
1229	 */
1230	if (WARN_ON_ONCE(len >= ordered->num_bytes))
1231		return ERR_PTR(-EINVAL);
1232	/*
1233	 * If our ordered extent had an error there's no point in continuing.
1234	 * The error may have come from a transaction abort done either by this
1235	 * task or some other concurrent task, and the transaction abort path
1236	 * iterates over all existing ordered extents and sets the flag
1237	 * BTRFS_ORDERED_IOERR on them.
1238	 */
1239	if (unlikely(flags & (1U << BTRFS_ORDERED_IOERR))) {
1240		const int fs_error = BTRFS_FS_ERROR(fs_info);
1241
1242		return fs_error ? ERR_PTR(fs_error) : ERR_PTR(-EIO);
1243	}
1244	/* We cannot split partially completed ordered extents. */
1245	if (ordered->bytes_left) {
1246		ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1247		if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1248			return ERR_PTR(-EINVAL);
1249	}
1250	/* We cannot split a compressed ordered extent. */
1251	if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1252		return ERR_PTR(-EINVAL);
1253
1254	new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1255				   len, 0, flags, ordered->compress_type);
1256	if (IS_ERR(new))
1257		return new;
1258
1259	/* One ref for the tree. */
1260	refcount_inc(&new->refs);
1261
1262	/*
1263	 * Take the root's ordered_extent_lock to avoid a race with
1264	 * btrfs_wait_ordered_extents() when updating the disk_bytenr and
1265	 * disk_num_bytes fields of the ordered extent below. And we disable
1266	 * IRQs because the inode's ordered_tree_lock is used in IRQ context
1267	 * elsewhere.
1268	 *
1269	 * There's no concern about a previous caller of
1270	 * btrfs_wait_ordered_extents() getting the trimmed ordered extent
1271	 * before we insert the new one, because even if it gets the ordered
1272	 * extent before it's trimmed and the new one inserted, right before it
1273	 * uses it or during its use, the ordered extent might have been
1274	 * trimmed in the meanwhile, and it missed the new ordered extent.
1275	 * There's no way around this and it's harmless for current use cases,
1276	 * so we take the root's ordered_extent_lock to fix that race during
1277	 * trimming and silence tools like KCSAN.
1278	 */
1279	spin_lock_irq(&root->ordered_extent_lock);
1280	spin_lock(&inode->ordered_tree_lock);
1281
1282	/*
1283	 * We don't have overlapping ordered extents (that would imply double
1284	 * allocation of extents) and we checked above that the split length
1285	 * does not cross the ordered extent's num_bytes field, so there's
1286	 * no need to remove it and re-insert it in the tree.
1287	 */
1288	ordered->file_offset += len;
1289	ordered->disk_bytenr += len;
1290	ordered->num_bytes -= len;
1291	ordered->disk_num_bytes -= len;
1292	ordered->ram_bytes -= len;
1293
1294	if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1295		ASSERT(ordered->bytes_left == 0);
1296		new->bytes_left = 0;
1297	} else {
1298		ordered->bytes_left -= len;
1299	}
1300
1301	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1302		if (ordered->truncated_len > len) {
1303			ordered->truncated_len -= len;
1304		} else {
1305			new->truncated_len = ordered->truncated_len;
1306			ordered->truncated_len = 0;
1307		}
1308	}
1309
1310	list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1311		if (offset == len)
1312			break;
1313		list_move_tail(&sum->list, &new->list);
1314		offset += sum->len;
1315	}
1316
1317	node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1318	if (unlikely(node))
1319		btrfs_panic(fs_info, -EEXIST,
1320			"inconsistency in ordered tree at offset %llu after split",
1321			new->file_offset);
1322	spin_unlock(&inode->ordered_tree_lock);
1323
1324	list_add_tail(&new->root_extent_list, &root->ordered_extents);
1325	root->nr_ordered_extents++;
1326	spin_unlock_irq(&root->ordered_extent_lock);
1327	return new;
1328}
1329
1330int __init ordered_data_init(void)
1331{
1332	btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
 
 
 
1333	if (!btrfs_ordered_extent_cache)
1334		return -ENOMEM;
1335
1336	return 0;
1337}
1338
1339void __cold ordered_data_exit(void)
1340{
1341	kmem_cache_destroy(btrfs_ordered_extent_cache);
1342}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/blkdev.h>
   8#include <linux/writeback.h>
   9#include <linux/sched/mm.h>
 
  10#include "misc.h"
  11#include "ctree.h"
  12#include "transaction.h"
  13#include "btrfs_inode.h"
  14#include "extent_io.h"
  15#include "disk-io.h"
  16#include "compression.h"
  17#include "delalloc-space.h"
 
 
 
 
  18
  19static struct kmem_cache *btrfs_ordered_extent_cache;
  20
  21static u64 entry_end(struct btrfs_ordered_extent *entry)
  22{
  23	if (entry->file_offset + entry->len < entry->file_offset)
  24		return (u64)-1;
  25	return entry->file_offset + entry->len;
  26}
  27
  28/* returns NULL if the insertion worked, or it returns the node it did find
  29 * in the tree
  30 */
  31static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  32				   struct rb_node *node)
  33{
  34	struct rb_node **p = &root->rb_node;
  35	struct rb_node *parent = NULL;
  36	struct btrfs_ordered_extent *entry;
  37
  38	while (*p) {
  39		parent = *p;
  40		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  41
  42		if (file_offset < entry->file_offset)
  43			p = &(*p)->rb_left;
  44		else if (file_offset >= entry_end(entry))
  45			p = &(*p)->rb_right;
  46		else
  47			return parent;
  48	}
  49
  50	rb_link_node(node, parent, p);
  51	rb_insert_color(node, root);
  52	return NULL;
  53}
  54
  55static void ordered_data_tree_panic(struct inode *inode, int errno,
  56					       u64 offset)
  57{
  58	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  59	btrfs_panic(fs_info, errno,
  60		    "Inconsistency in ordered tree at offset %llu", offset);
  61}
  62
  63/*
  64 * look for a given offset in the tree, and if it can't be found return the
  65 * first lesser offset
  66 */
  67static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  68				     struct rb_node **prev_ret)
  69{
  70	struct rb_node *n = root->rb_node;
  71	struct rb_node *prev = NULL;
  72	struct rb_node *test;
  73	struct btrfs_ordered_extent *entry;
  74	struct btrfs_ordered_extent *prev_entry = NULL;
  75
  76	while (n) {
  77		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  78		prev = n;
  79		prev_entry = entry;
  80
  81		if (file_offset < entry->file_offset)
  82			n = n->rb_left;
  83		else if (file_offset >= entry_end(entry))
  84			n = n->rb_right;
  85		else
  86			return n;
  87	}
  88	if (!prev_ret)
  89		return NULL;
  90
  91	while (prev && file_offset >= entry_end(prev_entry)) {
  92		test = rb_next(prev);
  93		if (!test)
  94			break;
  95		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  96				      rb_node);
  97		if (file_offset < entry_end(prev_entry))
  98			break;
  99
 100		prev = test;
 101	}
 102	if (prev)
 103		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 104				      rb_node);
 105	while (prev && file_offset < entry_end(prev_entry)) {
 106		test = rb_prev(prev);
 107		if (!test)
 108			break;
 109		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 110				      rb_node);
 111		prev = test;
 112	}
 113	*prev_ret = prev;
 114	return NULL;
 115}
 116
 117/*
 118 * helper to check if a given offset is inside a given entry
 119 */
 120static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
 121{
 122	if (file_offset < entry->file_offset ||
 123	    entry->file_offset + entry->len <= file_offset)
 124		return 0;
 125	return 1;
 126}
 127
 128static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 129			  u64 len)
 130{
 131	if (file_offset + len <= entry->file_offset ||
 132	    entry->file_offset + entry->len <= file_offset)
 133		return 0;
 134	return 1;
 135}
 136
 137/*
 138 * look find the first ordered struct that has this offset, otherwise
 139 * the first one less than this offset
 140 */
 141static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
 142					  u64 file_offset)
 143{
 144	struct rb_root *root = &tree->tree;
 145	struct rb_node *prev = NULL;
 146	struct rb_node *ret;
 147	struct btrfs_ordered_extent *entry;
 148
 149	if (tree->last) {
 150		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
 151				 rb_node);
 152		if (offset_in_entry(entry, file_offset))
 153			return tree->last;
 154	}
 155	ret = __tree_search(root, file_offset, &prev);
 156	if (!ret)
 157		ret = prev;
 158	if (ret)
 159		tree->last = ret;
 160	return ret;
 161}
 162
 163/* allocate and add a new ordered_extent into the per-inode tree.
 164 * file_offset is the logical offset in the file
 165 *
 166 * start is the disk block number of an extent already reserved in the
 167 * extent allocation tree
 168 *
 169 * len is the length of the extent
 170 *
 171 * The tree is given a single reference on the ordered extent that was
 172 * inserted.
 173 */
 174static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 175				      u64 start, u64 len, u64 disk_len,
 176				      int type, int dio, int compress_type)
 177{
 178	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 179	struct btrfs_root *root = BTRFS_I(inode)->root;
 180	struct btrfs_ordered_inode_tree *tree;
 181	struct rb_node *node;
 182	struct btrfs_ordered_extent *entry;
 
 
 183
 184	tree = &BTRFS_I(inode)->ordered_tree;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 185	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 186	if (!entry)
 187		return -ENOMEM;
 188
 189	entry->file_offset = file_offset;
 190	entry->start = start;
 191	entry->len = len;
 192	entry->disk_len = disk_len;
 193	entry->bytes_left = len;
 194	entry->inode = igrab(inode);
 
 
 195	entry->compress_type = compress_type;
 196	entry->truncated_len = (u64)-1;
 197	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
 198		set_bit(type, &entry->flags);
 199
 200	if (dio) {
 201		percpu_counter_add_batch(&fs_info->dio_bytes, len,
 202					 fs_info->delalloc_batch);
 203		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
 204	}
 205
 206	/* one ref for the tree */
 207	refcount_set(&entry->refs, 1);
 208	init_waitqueue_head(&entry->wait);
 209	INIT_LIST_HEAD(&entry->list);
 
 210	INIT_LIST_HEAD(&entry->root_extent_list);
 211	INIT_LIST_HEAD(&entry->work_list);
 
 212	init_completion(&entry->completion);
 213	INIT_LIST_HEAD(&entry->log_list);
 214	INIT_LIST_HEAD(&entry->trans_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 215
 216	trace_btrfs_ordered_extent_add(inode, entry);
 217
 218	spin_lock_irq(&tree->lock);
 219	node = tree_insert(&tree->tree, file_offset,
 
 
 
 
 
 
 220			   &entry->rb_node);
 221	if (node)
 222		ordered_data_tree_panic(inode, -EEXIST, file_offset);
 223	spin_unlock_irq(&tree->lock);
 
 
 224
 225	spin_lock(&root->ordered_extent_lock);
 226	list_add_tail(&entry->root_extent_list,
 227		      &root->ordered_extents);
 228	root->nr_ordered_extents++;
 229	if (root->nr_ordered_extents == 1) {
 230		spin_lock(&fs_info->ordered_root_lock);
 231		BUG_ON(!list_empty(&root->ordered_root));
 232		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
 233		spin_unlock(&fs_info->ordered_root_lock);
 234	}
 235	spin_unlock(&root->ordered_extent_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 236
 237	/*
 238	 * We don't need the count_max_extents here, we can assume that all of
 239	 * that work has been done at higher layers, so this is truly the
 240	 * smallest the extent is going to get.
 
 
 
 
 
 
 
 241	 */
 242	spin_lock(&BTRFS_I(inode)->lock);
 243	btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
 244	spin_unlock(&BTRFS_I(inode)->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245
 246	return 0;
 
 
 
 247}
 248
 249int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 250			     u64 start, u64 len, u64 disk_len, int type)
 251{
 252	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 253					  disk_len, type, 0,
 254					  BTRFS_COMPRESS_NONE);
 
 255}
 256
 257int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
 258				 u64 start, u64 len, u64 disk_len, int type)
 
 259{
 260	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 261					  disk_len, type, 1,
 262					  BTRFS_COMPRESS_NONE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263}
 264
 265int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
 266				      u64 start, u64 len, u64 disk_len,
 267				      int type, int compress_type)
 268{
 269	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
 270					  disk_len, type, 0,
 271					  compress_type);
 
 
 
 
 272}
 273
 274/*
 275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 276 * when an ordered extent is finished.  If the list covers more than one
 277 * ordered extent, it is split across multiples.
 278 */
 279void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
 280			   struct btrfs_ordered_sum *sum)
 281{
 282	struct btrfs_ordered_inode_tree *tree;
 
 
 
 
 
 
 
 
 
 283
 284	tree = &BTRFS_I(entry->inode)->ordered_tree;
 285	spin_lock_irq(&tree->lock);
 286	list_add_tail(&sum->list, &entry->list);
 287	spin_unlock_irq(&tree->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 288}
 289
 290/*
 291 * this is used to account for finished IO across a given range
 292 * of the file.  The IO may span ordered extents.  If
 293 * a given ordered_extent is completely done, 1 is returned, otherwise
 294 * 0.
 295 *
 296 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 297 * to make sure this function only returns 1 once for a given ordered extent.
 
 
 
 
 298 *
 299 * file_offset is updated to one byte past the range that is recorded as
 300 * complete.  This allows you to walk forward in the file.
 301 */
 302int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 303				   struct btrfs_ordered_extent **cached,
 304				   u64 *file_offset, u64 io_size, int uptodate)
 305{
 306	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 307	struct btrfs_ordered_inode_tree *tree;
 308	struct rb_node *node;
 309	struct btrfs_ordered_extent *entry = NULL;
 310	int ret;
 311	unsigned long flags;
 312	u64 dec_end;
 313	u64 dec_start;
 314	u64 to_dec;
 315
 316	tree = &BTRFS_I(inode)->ordered_tree;
 317	spin_lock_irqsave(&tree->lock, flags);
 318	node = tree_search(tree, *file_offset);
 319	if (!node) {
 320		ret = 1;
 321		goto out;
 322	}
 
 
 
 
 
 323
 324	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 325	if (!offset_in_entry(entry, *file_offset)) {
 326		ret = 1;
 327		goto out;
 328	}
 
 
 
 
 
 
 
 
 
 329
 330	dec_start = max(*file_offset, entry->file_offset);
 331	dec_end = min(*file_offset + io_size, entry->file_offset +
 332		      entry->len);
 333	*file_offset = dec_end;
 334	if (dec_start > dec_end) {
 335		btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
 336			   dec_start, dec_end);
 337	}
 338	to_dec = dec_end - dec_start;
 339	if (to_dec > entry->bytes_left) {
 340		btrfs_crit(fs_info,
 341			   "bad ordered accounting left %llu size %llu",
 342			   entry->bytes_left, to_dec);
 343	}
 344	entry->bytes_left -= to_dec;
 345	if (!uptodate)
 346		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 347
 348	if (entry->bytes_left == 0) {
 349		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 350		/* test_and_set_bit implies a barrier */
 351		cond_wake_up_nomb(&entry->wait);
 352	} else {
 353		ret = 1;
 354	}
 355out:
 356	if (!ret && cached && entry) {
 357		*cached = entry;
 358		refcount_inc(&entry->refs);
 
 
 
 
 
 
 
 359	}
 360	spin_unlock_irqrestore(&tree->lock, flags);
 361	return ret == 0;
 362}
 363
 364/*
 365 * this is used to account for finished IO across a given range
 366 * of the file.  The IO should not span ordered extents.  If
 367 * a given ordered_extent is completely done, 1 is returned, otherwise
 368 * 0.
 369 *
 370 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
 371 * to make sure this function only returns 1 once for a given ordered extent.
 
 
 
 
 
 
 
 
 
 
 372 */
 373int btrfs_dec_test_ordered_pending(struct inode *inode,
 374				   struct btrfs_ordered_extent **cached,
 375				   u64 file_offset, u64 io_size, int uptodate)
 376{
 377	struct btrfs_ordered_inode_tree *tree;
 378	struct rb_node *node;
 379	struct btrfs_ordered_extent *entry = NULL;
 380	unsigned long flags;
 381	int ret;
 382
 383	tree = &BTRFS_I(inode)->ordered_tree;
 384	spin_lock_irqsave(&tree->lock, flags);
 385	if (cached && *cached) {
 386		entry = *cached;
 387		goto have_entry;
 388	}
 389
 390	node = tree_search(tree, file_offset);
 391	if (!node) {
 392		ret = 1;
 393		goto out;
 394	}
 395
 396	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 397have_entry:
 398	if (!offset_in_entry(entry, file_offset)) {
 399		ret = 1;
 400		goto out;
 401	}
 402
 403	if (io_size > entry->bytes_left) {
 404		btrfs_crit(BTRFS_I(inode)->root->fs_info,
 405			   "bad ordered accounting left %llu size %llu",
 406		       entry->bytes_left, io_size);
 407	}
 408	entry->bytes_left -= io_size;
 409	if (!uptodate)
 410		set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
 411
 412	if (entry->bytes_left == 0) {
 413		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 
 
 
 
 414		/* test_and_set_bit implies a barrier */
 415		cond_wake_up_nomb(&entry->wait);
 416	} else {
 417		ret = 1;
 418	}
 419out:
 420	if (!ret && cached && entry) {
 421		*cached = entry;
 422		refcount_inc(&entry->refs);
 
 423	}
 424	spin_unlock_irqrestore(&tree->lock, flags);
 425	return ret == 0;
 426}
 427
 428/*
 429 * used to drop a reference on an ordered extent.  This will free
 430 * the extent if the last reference is dropped
 431 */
 432void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 433{
 434	struct list_head *cur;
 435	struct btrfs_ordered_sum *sum;
 436
 437	trace_btrfs_ordered_extent_put(entry->inode, entry);
 438
 439	if (refcount_dec_and_test(&entry->refs)) {
 
 440		ASSERT(list_empty(&entry->log_list));
 441		ASSERT(list_empty(&entry->trans_list));
 442		ASSERT(list_empty(&entry->root_extent_list));
 443		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
 444		if (entry->inode)
 445			btrfs_add_delayed_iput(entry->inode);
 446		while (!list_empty(&entry->list)) {
 447			cur = entry->list.next;
 448			sum = list_entry(cur, struct btrfs_ordered_sum, list);
 449			list_del(&sum->list);
 450			kvfree(sum);
 451		}
 452		kmem_cache_free(btrfs_ordered_extent_cache, entry);
 453	}
 454}
 455
 456/*
 457 * remove an ordered extent from the tree.  No references are dropped
 458 * and waiters are woken up.
 459 */
 460void btrfs_remove_ordered_extent(struct inode *inode,
 461				 struct btrfs_ordered_extent *entry)
 462{
 463	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 464	struct btrfs_ordered_inode_tree *tree;
 465	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
 466	struct btrfs_root *root = btrfs_inode->root;
 
 467	struct rb_node *node;
 
 
 468
 469	/* This is paired with btrfs_add_ordered_extent. */
 
 
 
 
 
 
 
 470	spin_lock(&btrfs_inode->lock);
 471	btrfs_mod_outstanding_extents(btrfs_inode, -1);
 472	spin_unlock(&btrfs_inode->lock);
 473	if (root != fs_info->tree_root)
 474		btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false);
 
 
 
 
 
 
 
 
 
 475
 476	if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 477		percpu_counter_add_batch(&fs_info->dio_bytes, -entry->len,
 478					 fs_info->delalloc_batch);
 479
 480	tree = &btrfs_inode->ordered_tree;
 481	spin_lock_irq(&tree->lock);
 482	node = &entry->rb_node;
 483	rb_erase(node, &tree->tree);
 484	RB_CLEAR_NODE(node);
 485	if (tree->last == node)
 486		tree->last = NULL;
 487	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 488	spin_unlock_irq(&tree->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 489
 490	spin_lock(&root->ordered_extent_lock);
 491	list_del_init(&entry->root_extent_list);
 492	root->nr_ordered_extents--;
 493
 494	trace_btrfs_ordered_extent_remove(inode, entry);
 495
 496	if (!root->nr_ordered_extents) {
 497		spin_lock(&fs_info->ordered_root_lock);
 498		BUG_ON(list_empty(&root->ordered_root));
 499		list_del_init(&root->ordered_root);
 500		spin_unlock(&fs_info->ordered_root_lock);
 501	}
 502	spin_unlock(&root->ordered_extent_lock);
 503	wake_up(&entry->wait);
 
 
 504}
 505
 506static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 507{
 508	struct btrfs_ordered_extent *ordered;
 509
 510	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 511	btrfs_start_ordered_extent(ordered->inode, ordered, 1);
 512	complete(&ordered->completion);
 513}
 514
 515/*
 516 * wait for all the ordered extents in a root.  This is done when balancing
 517 * space between drives.
 518 */
 519u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 520			       const u64 range_start, const u64 range_len)
 521{
 522	struct btrfs_fs_info *fs_info = root->fs_info;
 523	LIST_HEAD(splice);
 524	LIST_HEAD(skipped);
 525	LIST_HEAD(works);
 526	struct btrfs_ordered_extent *ordered, *next;
 527	u64 count = 0;
 528	const u64 range_end = range_start + range_len;
 
 
 
 
 
 
 
 
 
 
 529
 530	mutex_lock(&root->ordered_extent_mutex);
 531	spin_lock(&root->ordered_extent_lock);
 532	list_splice_init(&root->ordered_extents, &splice);
 533	while (!list_empty(&splice) && nr) {
 534		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 535					   root_extent_list);
 536
 537		if (range_end <= ordered->start ||
 538		    ordered->start + ordered->disk_len <= range_start) {
 539			list_move_tail(&ordered->root_extent_list, &skipped);
 540			cond_resched_lock(&root->ordered_extent_lock);
 541			continue;
 542		}
 543
 544		list_move_tail(&ordered->root_extent_list,
 545			       &root->ordered_extents);
 546		refcount_inc(&ordered->refs);
 547		spin_unlock(&root->ordered_extent_lock);
 548
 549		btrfs_init_work(&ordered->flush_work,
 550				btrfs_flush_delalloc_helper,
 551				btrfs_run_ordered_extent_work, NULL, NULL);
 552		list_add_tail(&ordered->work_list, &works);
 553		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 554
 555		cond_resched();
 556		spin_lock(&root->ordered_extent_lock);
 557		if (nr != U64_MAX)
 558			nr--;
 559		count++;
 
 560	}
 561	list_splice_tail(&skipped, &root->ordered_extents);
 562	list_splice_tail(&splice, &root->ordered_extents);
 563	spin_unlock(&root->ordered_extent_lock);
 564
 565	list_for_each_entry_safe(ordered, next, &works, work_list) {
 566		list_del_init(&ordered->work_list);
 567		wait_for_completion(&ordered->completion);
 568		btrfs_put_ordered_extent(ordered);
 569		cond_resched();
 570	}
 571	mutex_unlock(&root->ordered_extent_mutex);
 572
 573	return count;
 574}
 575
 576u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 577			     const u64 range_start, const u64 range_len)
 
 
 
 
 578{
 579	struct btrfs_root *root;
 580	struct list_head splice;
 581	u64 total_done = 0;
 582	u64 done;
 583
 584	INIT_LIST_HEAD(&splice);
 585
 586	mutex_lock(&fs_info->ordered_operations_mutex);
 587	spin_lock(&fs_info->ordered_root_lock);
 588	list_splice_init(&fs_info->ordered_roots, &splice);
 589	while (!list_empty(&splice) && nr) {
 590		root = list_first_entry(&splice, struct btrfs_root,
 591					ordered_root);
 592		root = btrfs_grab_fs_root(root);
 593		BUG_ON(!root);
 594		list_move_tail(&root->ordered_root,
 595			       &fs_info->ordered_roots);
 596		spin_unlock(&fs_info->ordered_root_lock);
 597
 598		done = btrfs_wait_ordered_extents(root, nr,
 599						  range_start, range_len);
 600		btrfs_put_fs_root(root);
 601		total_done += done;
 
 602
 603		spin_lock(&fs_info->ordered_root_lock);
 604		if (nr != U64_MAX) {
 605			nr -= done;
 606		}
 607	}
 608	list_splice_tail(&splice, &fs_info->ordered_roots);
 609	spin_unlock(&fs_info->ordered_root_lock);
 610	mutex_unlock(&fs_info->ordered_operations_mutex);
 611
 612	return total_done;
 613}
 614
 615/*
 616 * Used to start IO or wait for a given ordered extent to finish.
 617 *
 618 * If wait is one, this effectively waits on page writeback for all the pages
 619 * in the extent, and it waits on the io completion code to insert
 620 * metadata into the btree corresponding to the extent
 621 */
 622void btrfs_start_ordered_extent(struct inode *inode,
 623				       struct btrfs_ordered_extent *entry,
 624				       int wait)
 625{
 626	u64 start = entry->file_offset;
 627	u64 end = start + entry->len - 1;
 
 
 628
 629	trace_btrfs_ordered_extent_start(inode, entry);
 630
 631	/*
 
 
 
 
 
 
 632	 * pages in the range can be dirty, clean or writeback.  We
 633	 * start IO on any dirty ones so the wait doesn't stall waiting
 634	 * for the flusher thread to find them
 635	 */
 636	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 637		filemap_fdatawrite_range(inode->i_mapping, start, end);
 638	if (wait) {
 639		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
 640						 &entry->flags));
 641	}
 642}
 643
 644/*
 645 * Used to wait on ordered extents across a large range of bytes.
 646 */
 647int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 648{
 649	int ret = 0;
 650	int ret_wb = 0;
 651	u64 end;
 652	u64 orig_end;
 653	struct btrfs_ordered_extent *ordered;
 654
 655	if (start + len < start) {
 656		orig_end = INT_LIMIT(loff_t);
 657	} else {
 658		orig_end = start + len - 1;
 659		if (orig_end > INT_LIMIT(loff_t))
 660			orig_end = INT_LIMIT(loff_t);
 661	}
 662
 663	/* start IO across the range first to instantiate any delalloc
 664	 * extents
 665	 */
 666	ret = btrfs_fdatawrite_range(inode, start, orig_end);
 667	if (ret)
 668		return ret;
 669
 670	/*
 671	 * If we have a writeback error don't return immediately. Wait first
 672	 * for any ordered extents that haven't completed yet. This is to make
 673	 * sure no one can dirty the same page ranges and call writepages()
 674	 * before the ordered extents complete - to avoid failures (-EEXIST)
 675	 * when adding the new ordered extents to the ordered tree.
 676	 */
 677	ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 678
 679	end = orig_end;
 680	while (1) {
 681		ordered = btrfs_lookup_first_ordered_extent(inode, end);
 682		if (!ordered)
 683			break;
 684		if (ordered->file_offset > orig_end) {
 685			btrfs_put_ordered_extent(ordered);
 686			break;
 687		}
 688		if (ordered->file_offset + ordered->len <= start) {
 689			btrfs_put_ordered_extent(ordered);
 690			break;
 691		}
 692		btrfs_start_ordered_extent(inode, ordered, 1);
 693		end = ordered->file_offset;
 
 
 
 
 
 694		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 695			ret = -EIO;
 696		btrfs_put_ordered_extent(ordered);
 697		if (ret || end == 0 || end == start)
 698			break;
 699		end--;
 700	}
 701	return ret_wb ? ret_wb : ret;
 702}
 703
 704/*
 705 * find an ordered extent corresponding to file_offset.  return NULL if
 706 * nothing is found, otherwise take a reference on the extent and return it
 707 */
 708struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
 709							 u64 file_offset)
 710{
 711	struct btrfs_ordered_inode_tree *tree;
 712	struct rb_node *node;
 713	struct btrfs_ordered_extent *entry = NULL;
 
 714
 715	tree = &BTRFS_I(inode)->ordered_tree;
 716	spin_lock_irq(&tree->lock);
 717	node = tree_search(tree, file_offset);
 718	if (!node)
 719		goto out;
 720
 721	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 722	if (!offset_in_entry(entry, file_offset))
 723		entry = NULL;
 724	if (entry)
 725		refcount_inc(&entry->refs);
 
 
 726out:
 727	spin_unlock_irq(&tree->lock);
 728	return entry;
 729}
 730
 731/* Since the DIO code tries to lock a wide area we need to look for any ordered
 732 * extents that exist in the range, rather than just the start of the range.
 733 */
 734struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 735		struct btrfs_inode *inode, u64 file_offset, u64 len)
 736{
 737	struct btrfs_ordered_inode_tree *tree;
 738	struct rb_node *node;
 739	struct btrfs_ordered_extent *entry = NULL;
 740
 741	tree = &inode->ordered_tree;
 742	spin_lock_irq(&tree->lock);
 743	node = tree_search(tree, file_offset);
 744	if (!node) {
 745		node = tree_search(tree, file_offset + len);
 746		if (!node)
 747			goto out;
 748	}
 749
 750	while (1) {
 751		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 752		if (range_overlaps(entry, file_offset, len))
 753			break;
 754
 755		if (entry->file_offset >= file_offset + len) {
 756			entry = NULL;
 757			break;
 758		}
 759		entry = NULL;
 760		node = rb_next(node);
 761		if (!node)
 762			break;
 763	}
 764out:
 765	if (entry)
 766		refcount_inc(&entry->refs);
 767	spin_unlock_irq(&tree->lock);
 
 
 768	return entry;
 769}
 770
 771/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 772 * lookup and return any extent before 'file_offset'.  NULL is returned
 773 * if none is found
 774 */
 775struct btrfs_ordered_extent *
 776btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
 777{
 778	struct btrfs_ordered_inode_tree *tree;
 779	struct rb_node *node;
 780	struct btrfs_ordered_extent *entry = NULL;
 781
 782	tree = &BTRFS_I(inode)->ordered_tree;
 783	spin_lock_irq(&tree->lock);
 784	node = tree_search(tree, file_offset);
 785	if (!node)
 786		goto out;
 787
 788	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 789	refcount_inc(&entry->refs);
 
 790out:
 791	spin_unlock_irq(&tree->lock);
 792	return entry;
 793}
 794
 795/*
 796 * After an extent is done, call this to conditionally update the on disk
 797 * i_size.  i_size is updated to cover any fully written part of the file.
 
 
 
 
 
 798 */
 799int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 800				struct btrfs_ordered_extent *ordered)
 801{
 802	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 803	u64 disk_i_size;
 804	u64 new_i_size;
 805	u64 i_size = i_size_read(inode);
 806	struct rb_node *node;
 807	struct rb_node *prev = NULL;
 808	struct btrfs_ordered_extent *test;
 809	int ret = 1;
 810	u64 orig_offset = offset;
 811
 812	spin_lock_irq(&tree->lock);
 813	if (ordered) {
 814		offset = entry_end(ordered);
 815		if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
 816			offset = min(offset,
 817				     ordered->file_offset +
 818				     ordered->truncated_len);
 819	} else {
 820		offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
 821	}
 822	disk_i_size = BTRFS_I(inode)->disk_i_size;
 823
 824	/*
 825	 * truncate file.
 826	 * If ordered is not NULL, then this is called from endio and
 827	 * disk_i_size will be updated by either truncate itself or any
 828	 * in-flight IOs which are inside the disk_i_size.
 829	 *
 830	 * Because btrfs_setsize() may set i_size with disk_i_size if truncate
 831	 * fails somehow, we need to make sure we have a precise disk_i_size by
 832	 * updating it as usual.
 833	 *
 834	 */
 835	if (!ordered && disk_i_size > i_size) {
 836		BTRFS_I(inode)->disk_i_size = orig_offset;
 837		ret = 0;
 838		goto out;
 839	}
 840
 
 
 841	/*
 842	 * if the disk i_size is already at the inode->i_size, or
 843	 * this ordered extent is inside the disk i_size, we're done
 
 
 844	 */
 845	if (disk_i_size == i_size)
 846		goto out;
 847
 848	/*
 849	 * We still need to update disk_i_size if outstanding_isize is greater
 850	 * than disk_i_size.
 851	 */
 852	if (offset <= disk_i_size &&
 853	    (!ordered || ordered->outstanding_isize <= disk_i_size))
 
 
 
 
 
 
 
 
 854		goto out;
 
 855
 856	/*
 857	 * walk backward from this ordered extent to disk_i_size.
 858	 * if we find an ordered extent then we can't update disk i_size
 859	 * yet
 860	 */
 861	if (ordered) {
 862		node = rb_prev(&ordered->rb_node);
 863	} else {
 864		prev = tree_search(tree, offset);
 865		/*
 866		 * we insert file extents without involving ordered struct,
 867		 * so there should be no ordered struct cover this offset
 868		 */
 869		if (prev) {
 870			test = rb_entry(prev, struct btrfs_ordered_extent,
 871					rb_node);
 872			BUG_ON(offset_in_entry(test, offset));
 873		}
 874		node = prev;
 875	}
 876	for (; node; node = rb_prev(node)) {
 877		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 878
 879		/* We treat this entry as if it doesn't exist */
 880		if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
 881			continue;
 882
 883		if (entry_end(test) <= disk_i_size)
 884			break;
 885		if (test->file_offset >= i_size)
 886			break;
 887
 888		/*
 889		 * We don't update disk_i_size now, so record this undealt
 890		 * i_size. Or we will not know the real i_size.
 891		 */
 892		if (test->outstanding_isize < offset)
 893			test->outstanding_isize = offset;
 894		if (ordered &&
 895		    ordered->outstanding_isize > test->outstanding_isize)
 896			test->outstanding_isize = ordered->outstanding_isize;
 897		goto out;
 898	}
 899	new_i_size = min_t(u64, offset, i_size);
 900
 901	/*
 902	 * Some ordered extents may completed before the current one, and
 903	 * we hold the real i_size in ->outstanding_isize.
 904	 */
 905	if (ordered && ordered->outstanding_isize > new_i_size)
 906		new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
 907	BTRFS_I(inode)->disk_i_size = new_i_size;
 908	ret = 0;
 909out:
 910	/*
 911	 * We need to do this because we can't remove ordered extents until
 912	 * after the i_disk_size has been updated and then the inode has been
 913	 * updated to reflect the change, so we need to tell anybody who finds
 914	 * this ordered extent that we've already done all the real work, we
 915	 * just haven't completed all the other work.
 916	 */
 917	if (ordered)
 918		set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
 919	spin_unlock_irq(&tree->lock);
 920	return ret;
 921}
 922
 923/*
 924 * search the ordered extents for one corresponding to 'offset' and
 925 * try to find a checksum.  This is used because we allow pages to
 926 * be reclaimed before their checksum is actually put into the btree
 927 */
 928int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
 929			   u8 *sum, int len)
 930{
 931	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 932	struct btrfs_ordered_sum *ordered_sum;
 933	struct btrfs_ordered_extent *ordered;
 934	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 935	unsigned long num_sectors;
 936	unsigned long i;
 937	u32 sectorsize = btrfs_inode_sectorsize(inode);
 938	const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 939	int index = 0;
 940
 941	ordered = btrfs_lookup_ordered_extent(inode, offset);
 942	if (!ordered)
 943		return 0;
 944
 945	spin_lock_irq(&tree->lock);
 946	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
 947		if (disk_bytenr >= ordered_sum->bytenr &&
 948		    disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
 949			i = (disk_bytenr - ordered_sum->bytenr) >>
 950			    inode->i_sb->s_blocksize_bits;
 951			num_sectors = ordered_sum->len >>
 952				      inode->i_sb->s_blocksize_bits;
 953			num_sectors = min_t(int, len - index, num_sectors - i);
 954			memcpy(sum + index, ordered_sum->sums + i * csum_size,
 955			       num_sectors * csum_size);
 956
 957			index += (int)num_sectors * csum_size;
 958			if (index == len)
 959				goto out;
 960			disk_bytenr += num_sectors * sectorsize;
 961		}
 962	}
 963out:
 964	spin_unlock_irq(&tree->lock);
 965	btrfs_put_ordered_extent(ordered);
 966	return index;
 967}
 968
 969/*
 970 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
 971 * ordered extents in it are run to completion.
 972 *
 973 * @tree:         IO tree used for locking out other users of the range
 974 * @inode:        Inode whose ordered tree is to be searched
 975 * @start:        Beginning of range to flush
 976 * @end:          Last byte of range to lock
 977 * @cached_state: If passed, will return the extent state responsible for the
 978 * locked range. It's the caller's responsibility to free the cached state.
 
 979 *
 980 * This function always returns with the given range locked, ensuring after it's
 981 * called no order extent can be pending.
 982 */
 983void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
 984					struct btrfs_inode *inode, u64 start,
 985					u64 end,
 986					struct extent_state **cached_state)
 987{
 988	struct btrfs_ordered_extent *ordered;
 989	struct extent_state *cache = NULL;
 990	struct extent_state **cachedp = &cache;
 991
 992	if (cached_state)
 993		cachedp = cached_state;
 994
 995	while (1) {
 996		lock_extent_bits(tree, start, end, cachedp);
 997		ordered = btrfs_lookup_ordered_range(inode, start,
 998						     end - start + 1);
 999		if (!ordered) {
1000			/*
1001			 * If no external cached_state has been passed then
1002			 * decrement the extra ref taken for cachedp since we
1003			 * aren't exposing it outside of this function
1004			 */
1005			if (!cached_state)
1006				refcount_dec(&cache->refs);
1007			break;
1008		}
1009		unlock_extent_cached(tree, start, end, cachedp);
1010		btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
1011		btrfs_put_ordered_extent(ordered);
1012	}
1013}
1014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1015int __init ordered_data_init(void)
1016{
1017	btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1018				     sizeof(struct btrfs_ordered_extent), 0,
1019				     SLAB_MEM_SPREAD,
1020				     NULL);
1021	if (!btrfs_ordered_extent_cache)
1022		return -ENOMEM;
1023
1024	return 0;
1025}
1026
1027void __cold ordered_data_exit(void)
1028{
1029	kmem_cache_destroy(btrfs_ordered_extent_cache);
1030}