Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (C) 2007 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18
 19#include <linux/slab.h>
 20#include <linux/blkdev.h>
 21#include <linux/writeback.h>
 22#include <linux/pagevec.h>
 
 
 23#include "ctree.h"
 24#include "transaction.h"
 25#include "btrfs_inode.h"
 26#include "extent_io.h"
 
 
 
 
 
 
 
 
 27
 28static u64 entry_end(struct btrfs_ordered_extent *entry)
 29{
 30	if (entry->file_offset + entry->len < entry->file_offset)
 31		return (u64)-1;
 32	return entry->file_offset + entry->len;
 33}
 34
 35/* returns NULL if the insertion worked, or it returns the node it did find
 36 * in the tree
 37 */
 38static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
 39				   struct rb_node *node)
 40{
 41	struct rb_node **p = &root->rb_node;
 42	struct rb_node *parent = NULL;
 43	struct btrfs_ordered_extent *entry;
 44
 45	while (*p) {
 46		parent = *p;
 47		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
 48
 49		if (file_offset < entry->file_offset)
 50			p = &(*p)->rb_left;
 51		else if (file_offset >= entry_end(entry))
 52			p = &(*p)->rb_right;
 53		else
 54			return parent;
 55	}
 56
 57	rb_link_node(node, parent, p);
 58	rb_insert_color(node, root);
 59	return NULL;
 60}
 61
 62/*
 63 * look for a given offset in the tree, and if it can't be found return the
 64 * first lesser offset
 65 */
 66static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
 67				     struct rb_node **prev_ret)
 68{
 69	struct rb_node *n = root->rb_node;
 70	struct rb_node *prev = NULL;
 71	struct rb_node *test;
 72	struct btrfs_ordered_extent *entry;
 73	struct btrfs_ordered_extent *prev_entry = NULL;
 74
 75	while (n) {
 76		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
 77		prev = n;
 78		prev_entry = entry;
 79
 80		if (file_offset < entry->file_offset)
 81			n = n->rb_left;
 82		else if (file_offset >= entry_end(entry))
 83			n = n->rb_right;
 84		else
 85			return n;
 86	}
 87	if (!prev_ret)
 88		return NULL;
 89
 90	while (prev && file_offset >= entry_end(prev_entry)) {
 91		test = rb_next(prev);
 92		if (!test)
 93			break;
 94		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 95				      rb_node);
 96		if (file_offset < entry_end(prev_entry))
 97			break;
 98
 99		prev = test;
100	}
101	if (prev)
102		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
103				      rb_node);
104	while (prev && file_offset < entry_end(prev_entry)) {
105		test = rb_prev(prev);
106		if (!test)
107			break;
108		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
109				      rb_node);
110		prev = test;
111	}
112	*prev_ret = prev;
113	return NULL;
114}
115
116/*
117 * helper to check if a given offset is inside a given entry
118 */
119static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
120{
121	if (file_offset < entry->file_offset ||
122	    entry->file_offset + entry->len <= file_offset)
123		return 0;
124	return 1;
125}
126
127static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
128			  u64 len)
129{
130	if (file_offset + len <= entry->file_offset ||
131	    entry->file_offset + entry->len <= file_offset)
132		return 0;
133	return 1;
134}
135
136/*
137 * look find the first ordered struct that has this offset, otherwise
138 * the first one less than this offset
139 */
140static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
141					  u64 file_offset)
142{
143	struct rb_root *root = &tree->tree;
144	struct rb_node *prev = NULL;
145	struct rb_node *ret;
146	struct btrfs_ordered_extent *entry;
147
148	if (tree->last) {
149		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
150				 rb_node);
151		if (offset_in_entry(entry, file_offset))
152			return tree->last;
153	}
154	ret = __tree_search(root, file_offset, &prev);
155	if (!ret)
156		ret = prev;
157	if (ret)
158		tree->last = ret;
159	return ret;
160}
161
162/* allocate and add a new ordered_extent into the per-inode tree.
163 * file_offset is the logical offset in the file
164 *
165 * start is the disk block number of an extent already reserved in the
166 * extent allocation tree
167 *
168 * len is the length of the extent
169 *
170 * The tree is given a single reference on the ordered extent that was
171 * inserted.
172 */
173static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
174				      u64 start, u64 len, u64 disk_len,
175				      int type, int dio, int compress_type)
176{
177	struct btrfs_ordered_inode_tree *tree;
178	struct rb_node *node;
179	struct btrfs_ordered_extent *entry;
 
 
180
181	tree = &BTRFS_I(inode)->ordered_tree;
182	entry = kzalloc(sizeof(*entry), GFP_NOFS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183	if (!entry)
184		return -ENOMEM;
185
186	entry->file_offset = file_offset;
187	entry->start = start;
188	entry->len = len;
189	entry->disk_len = disk_len;
190	entry->bytes_left = len;
191	entry->inode = inode;
 
 
192	entry->compress_type = compress_type;
193	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
194		set_bit(type, &entry->flags);
195
196	if (dio)
197		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
198
199	/* one ref for the tree */
200	atomic_set(&entry->refs, 1);
201	init_waitqueue_head(&entry->wait);
202	INIT_LIST_HEAD(&entry->list);
 
203	INIT_LIST_HEAD(&entry->root_extent_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
205	trace_btrfs_ordered_extent_add(inode, entry);
206
207	spin_lock(&tree->lock);
208	node = tree_insert(&tree->tree, file_offset,
 
 
 
 
 
 
209			   &entry->rb_node);
210	BUG_ON(node);
211	spin_unlock(&tree->lock);
 
 
 
212
213	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
214	list_add_tail(&entry->root_extent_list,
215		      &BTRFS_I(inode)->root->fs_info->ordered_extents);
216	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
217
218	BUG_ON(node);
219	return 0;
 
 
 
 
220}
221
222int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
223			     u64 start, u64 len, u64 disk_len, int type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224{
225	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
226					  disk_len, type, 0,
227					  BTRFS_COMPRESS_NONE);
228}
229
230int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
231				 u64 start, u64 len, u64 disk_len, int type)
232{
233	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
234					  disk_len, type, 1,
235					  BTRFS_COMPRESS_NONE);
236}
237
238int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
239				      u64 start, u64 len, u64 disk_len,
240				      int type, int compress_type)
241{
242	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
243					  disk_len, type, 0,
244					  compress_type);
245}
246
247/*
248 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
249 * when an ordered extent is finished.  If the list covers more than one
250 * ordered extent, it is split across multiples.
251 */
252int btrfs_add_ordered_sum(struct inode *inode,
253			  struct btrfs_ordered_extent *entry,
254			  struct btrfs_ordered_sum *sum)
255{
256	struct btrfs_ordered_inode_tree *tree;
257
258	tree = &BTRFS_I(inode)->ordered_tree;
259	spin_lock(&tree->lock);
260	list_add_tail(&sum->list, &entry->list);
261	spin_unlock(&tree->lock);
262	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263}
264
265/*
266 * this is used to account for finished IO across a given range
267 * of the file.  The IO may span ordered extents.  If
268 * a given ordered_extent is completely done, 1 is returned, otherwise
269 * 0.
270 *
271 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
272 * to make sure this function only returns 1 once for a given ordered extent.
273 *
274 * file_offset is updated to one byte past the range that is recorded as
275 * complete.  This allows you to walk forward in the file.
276 */
277int btrfs_dec_test_first_ordered_pending(struct inode *inode,
278				   struct btrfs_ordered_extent **cached,
279				   u64 *file_offset, u64 io_size)
 
280{
281	struct btrfs_ordered_inode_tree *tree;
282	struct rb_node *node;
283	struct btrfs_ordered_extent *entry = NULL;
284	int ret;
285	u64 dec_end;
286	u64 dec_start;
287	u64 to_dec;
288
289	tree = &BTRFS_I(inode)->ordered_tree;
290	spin_lock(&tree->lock);
291	node = tree_search(tree, *file_offset);
292	if (!node) {
293		ret = 1;
294		goto out;
295	}
296
297	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
298	if (!offset_in_entry(entry, *file_offset)) {
299		ret = 1;
300		goto out;
301	}
 
 
 
 
302
303	dec_start = max(*file_offset, entry->file_offset);
304	dec_end = min(*file_offset + io_size, entry->file_offset +
305		      entry->len);
306	*file_offset = dec_end;
307	if (dec_start > dec_end) {
308		printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
309		       (unsigned long long)dec_start,
310		       (unsigned long long)dec_end);
311	}
312	to_dec = dec_end - dec_start;
313	if (to_dec > entry->bytes_left) {
314		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
315		       (unsigned long long)entry->bytes_left,
316		       (unsigned long long)to_dec);
317	}
318	entry->bytes_left -= to_dec;
319	if (entry->bytes_left == 0)
320		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
321	else
322		ret = 1;
323out:
324	if (!ret && cached && entry) {
325		*cached = entry;
326		atomic_inc(&entry->refs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327	}
328	spin_unlock(&tree->lock);
329	return ret == 0;
330}
331
332/*
333 * this is used to account for finished IO across a given range
334 * of the file.  The IO should not span ordered extents.  If
335 * a given ordered_extent is completely done, 1 is returned, otherwise
336 * 0.
337 *
338 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
339 * to make sure this function only returns 1 once for a given ordered extent.
 
 
 
 
 
 
 
 
 
 
340 */
341int btrfs_dec_test_ordered_pending(struct inode *inode,
342				   struct btrfs_ordered_extent **cached,
343				   u64 file_offset, u64 io_size)
344{
345	struct btrfs_ordered_inode_tree *tree;
346	struct rb_node *node;
347	struct btrfs_ordered_extent *entry = NULL;
348	int ret;
 
349
350	tree = &BTRFS_I(inode)->ordered_tree;
351	spin_lock(&tree->lock);
352	node = tree_search(tree, file_offset);
353	if (!node) {
354		ret = 1;
355		goto out;
356	}
357
 
 
 
 
358	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
359	if (!offset_in_entry(entry, file_offset)) {
360		ret = 1;
361		goto out;
362	}
363
364	if (io_size > entry->bytes_left) {
365		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
366		       (unsigned long long)entry->bytes_left,
367		       (unsigned long long)io_size);
368	}
369	entry->bytes_left -= io_size;
370	if (entry->bytes_left == 0)
371		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
372	else
373		ret = 1;
 
 
 
 
 
 
374out:
375	if (!ret && cached && entry) {
376		*cached = entry;
377		atomic_inc(&entry->refs);
 
378	}
379	spin_unlock(&tree->lock);
380	return ret == 0;
381}
382
383/*
384 * used to drop a reference on an ordered extent.  This will free
385 * the extent if the last reference is dropped
386 */
387int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
388{
389	struct list_head *cur;
390	struct btrfs_ordered_sum *sum;
391
392	trace_btrfs_ordered_extent_put(entry->inode, entry);
393
394	if (atomic_dec_and_test(&entry->refs)) {
 
 
 
 
 
395		while (!list_empty(&entry->list)) {
396			cur = entry->list.next;
397			sum = list_entry(cur, struct btrfs_ordered_sum, list);
398			list_del(&sum->list);
399			kfree(sum);
400		}
401		kfree(entry);
402	}
403	return 0;
404}
405
406/*
407 * remove an ordered extent from the tree.  No references are dropped
408 * and you must wake_up entry->wait.  You must hold the tree lock
409 * while you call this function.
410 */
411static int __btrfs_remove_ordered_extent(struct inode *inode,
412				struct btrfs_ordered_extent *entry)
413{
414	struct btrfs_ordered_inode_tree *tree;
415	struct btrfs_root *root = BTRFS_I(inode)->root;
416	struct rb_node *node;
 
 
417
418	tree = &BTRFS_I(inode)->ordered_tree;
419	node = &entry->rb_node;
420	rb_erase(node, &tree->tree);
421	tree->last = NULL;
422	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
423
424	spin_lock(&root->fs_info->ordered_extent_lock);
425	list_del_init(&entry->root_extent_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
427	trace_btrfs_ordered_extent_remove(inode, entry);
 
 
 
 
 
 
 
 
 
 
 
428
429	/*
430	 * we have no more ordered extents for this inode and
431	 * no dirty pages.  We can safely remove it from the
432	 * list of ordered extents
433	 */
434	if (RB_EMPTY_ROOT(&tree->tree) &&
435	    !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
436		list_del_init(&BTRFS_I(inode)->ordered_operations);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437	}
438	spin_unlock(&root->fs_info->ordered_extent_lock);
439
440	return 0;
441}
442
443/*
444 * remove an ordered extent from the tree.  No references are dropped
445 * but any waiters are woken.
446 */
447int btrfs_remove_ordered_extent(struct inode *inode,
448				struct btrfs_ordered_extent *entry)
449{
450	struct btrfs_ordered_inode_tree *tree;
451	int ret;
452
453	tree = &BTRFS_I(inode)->ordered_tree;
454	spin_lock(&tree->lock);
455	ret = __btrfs_remove_ordered_extent(inode, entry);
456	spin_unlock(&tree->lock);
 
 
 
457	wake_up(&entry->wait);
 
 
 
458
459	return ret;
 
 
 
 
 
 
460}
461
462/*
463 * wait for all the ordered extents in a root.  This is done when balancing
464 * space between drives.
465 */
466int btrfs_wait_ordered_extents(struct btrfs_root *root,
467			       int nocow_only, int delay_iput)
468{
469	struct list_head splice;
470	struct list_head *cur;
471	struct btrfs_ordered_extent *ordered;
472	struct inode *inode;
473
474	INIT_LIST_HEAD(&splice);
475
476	spin_lock(&root->fs_info->ordered_extent_lock);
477	list_splice_init(&root->fs_info->ordered_extents, &splice);
478	while (!list_empty(&splice)) {
479		cur = splice.next;
480		ordered = list_entry(cur, struct btrfs_ordered_extent,
481				     root_extent_list);
482		if (nocow_only &&
483		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
484		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
485			list_move(&ordered->root_extent_list,
486				  &root->fs_info->ordered_extents);
487			cond_resched_lock(&root->fs_info->ordered_extent_lock);
488			continue;
489		}
490
491		list_del_init(&ordered->root_extent_list);
492		atomic_inc(&ordered->refs);
 
 
 
 
 
 
 
493
494		/*
495		 * the inode may be getting freed (in sys_unlink path).
496		 */
497		inode = igrab(ordered->inode);
498
499		spin_unlock(&root->fs_info->ordered_extent_lock);
500
501		if (inode) {
502			btrfs_start_ordered_extent(inode, ordered, 1);
503			btrfs_put_ordered_extent(ordered);
504			if (delay_iput)
505				btrfs_add_delayed_iput(inode);
506			else
507				iput(inode);
508		} else {
509			btrfs_put_ordered_extent(ordered);
510		}
511
512		spin_lock(&root->fs_info->ordered_extent_lock);
513	}
514	spin_unlock(&root->fs_info->ordered_extent_lock);
515	return 0;
 
516}
517
518/*
519 * this is used during transaction commit to write all the inodes
520 * added to the ordered operation list.  These files must be fully on
521 * disk before the transaction commits.
522 *
523 * we have two modes here, one is to just start the IO via filemap_flush
524 * and the other is to wait for all the io.  When we wait, we have an
525 * extra check to make sure the ordered operation list really is empty
526 * before we return
527 */
528int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
529{
530	struct btrfs_inode *btrfs_inode;
531	struct inode *inode;
532	struct list_head splice;
533
534	INIT_LIST_HEAD(&splice);
535
536	mutex_lock(&root->fs_info->ordered_operations_mutex);
537	spin_lock(&root->fs_info->ordered_extent_lock);
538again:
539	list_splice_init(&root->fs_info->ordered_operations, &splice);
540
541	while (!list_empty(&splice)) {
542		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
543				   ordered_operations);
544
545		inode = &btrfs_inode->vfs_inode;
546
547		list_del_init(&btrfs_inode->ordered_operations);
548
549		/*
550		 * the inode may be getting freed (in sys_unlink path).
551		 */
552		inode = igrab(inode);
553
554		if (!wait && inode) {
555			list_add_tail(&BTRFS_I(inode)->ordered_operations,
556			      &root->fs_info->ordered_operations);
557		}
558		spin_unlock(&root->fs_info->ordered_extent_lock);
559
560		if (inode) {
561			if (wait)
562				btrfs_wait_ordered_range(inode, 0, (u64)-1);
563			else
564				filemap_flush(inode->i_mapping);
565			btrfs_add_delayed_iput(inode);
566		}
567
568		cond_resched();
569		spin_lock(&root->fs_info->ordered_extent_lock);
570	}
571	if (wait && !list_empty(&root->fs_info->ordered_operations))
572		goto again;
573
574	spin_unlock(&root->fs_info->ordered_extent_lock);
575	mutex_unlock(&root->fs_info->ordered_operations_mutex);
576
577	return 0;
578}
579
580/*
581 * Used to start IO or wait for a given ordered extent to finish.
582 *
583 * If wait is one, this effectively waits on page writeback for all the pages
584 * in the extent, and it waits on the io completion code to insert
585 * metadata into the btree corresponding to the extent
586 */
587void btrfs_start_ordered_extent(struct inode *inode,
588				       struct btrfs_ordered_extent *entry,
589				       int wait)
590{
591	u64 start = entry->file_offset;
592	u64 end = start + entry->len - 1;
 
 
593
594	trace_btrfs_ordered_extent_start(inode, entry);
595
596	/*
 
 
 
 
 
 
597	 * pages in the range can be dirty, clean or writeback.  We
598	 * start IO on any dirty ones so the wait doesn't stall waiting
599	 * for pdflush to find them
600	 */
601	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
602		filemap_fdatawrite_range(inode->i_mapping, start, end);
603	if (wait) {
604		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
605						 &entry->flags));
606	}
607}
608
609/*
610 * Used to wait on ordered extents across a large range of bytes.
611 */
612int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
613{
 
 
614	u64 end;
615	u64 orig_end;
616	struct btrfs_ordered_extent *ordered;
617	int found;
618
619	if (start + len < start) {
620		orig_end = INT_LIMIT(loff_t);
621	} else {
622		orig_end = start + len - 1;
623		if (orig_end > INT_LIMIT(loff_t))
624			orig_end = INT_LIMIT(loff_t);
625	}
626again:
627	/* start IO across the range first to instantiate any delalloc
628	 * extents
629	 */
630	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
 
631
632	/* The compression code will leave pages locked but return from
633	 * writepage without setting the page writeback.  Starting again
634	 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
 
 
 
635	 */
636	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
637
638	filemap_fdatawait_range(inode->i_mapping, start, orig_end);
639
640	end = orig_end;
641	found = 0;
642	while (1) {
643		ordered = btrfs_lookup_first_ordered_extent(inode, end);
644		if (!ordered)
645			break;
646		if (ordered->file_offset > orig_end) {
647			btrfs_put_ordered_extent(ordered);
648			break;
649		}
650		if (ordered->file_offset + ordered->len < start) {
651			btrfs_put_ordered_extent(ordered);
652			break;
653		}
654		found++;
655		btrfs_start_ordered_extent(inode, ordered, 1);
656		end = ordered->file_offset;
 
 
 
 
 
 
 
657		btrfs_put_ordered_extent(ordered);
658		if (end == 0 || end == start)
659			break;
660		end--;
661	}
662	if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
663			   EXTENT_DELALLOC, 0, NULL)) {
664		schedule_timeout(1);
665		goto again;
666	}
667	return 0;
668}
669
670/*
671 * find an ordered extent corresponding to file_offset.  return NULL if
672 * nothing is found, otherwise take a reference on the extent and return it
673 */
674struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
675							 u64 file_offset)
676{
677	struct btrfs_ordered_inode_tree *tree;
678	struct rb_node *node;
679	struct btrfs_ordered_extent *entry = NULL;
 
680
681	tree = &BTRFS_I(inode)->ordered_tree;
682	spin_lock(&tree->lock);
683	node = tree_search(tree, file_offset);
684	if (!node)
685		goto out;
686
687	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
688	if (!offset_in_entry(entry, file_offset))
689		entry = NULL;
690	if (entry)
691		atomic_inc(&entry->refs);
 
 
692out:
693	spin_unlock(&tree->lock);
694	return entry;
695}
696
697/* Since the DIO code tries to lock a wide area we need to look for any ordered
698 * extents that exist in the range, rather than just the start of the range.
699 */
700struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
701							u64 file_offset,
702							u64 len)
703{
704	struct btrfs_ordered_inode_tree *tree;
705	struct rb_node *node;
706	struct btrfs_ordered_extent *entry = NULL;
707
708	tree = &BTRFS_I(inode)->ordered_tree;
709	spin_lock(&tree->lock);
710	node = tree_search(tree, file_offset);
711	if (!node) {
712		node = tree_search(tree, file_offset + len);
713		if (!node)
714			goto out;
715	}
716
717	while (1) {
718		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
719		if (range_overlaps(entry, file_offset, len))
720			break;
721
722		if (entry->file_offset >= file_offset + len) {
723			entry = NULL;
724			break;
725		}
726		entry = NULL;
727		node = rb_next(node);
728		if (!node)
729			break;
730	}
731out:
732	if (entry)
733		atomic_inc(&entry->refs);
734	spin_unlock(&tree->lock);
 
 
735	return entry;
736}
737
738/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
739 * lookup and return any extent before 'file_offset'.  NULL is returned
740 * if none is found
741 */
742struct btrfs_ordered_extent *
743btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
744{
745	struct btrfs_ordered_inode_tree *tree;
746	struct rb_node *node;
747	struct btrfs_ordered_extent *entry = NULL;
748
749	tree = &BTRFS_I(inode)->ordered_tree;
750	spin_lock(&tree->lock);
751	node = tree_search(tree, file_offset);
752	if (!node)
753		goto out;
754
755	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
756	atomic_inc(&entry->refs);
 
757out:
758	spin_unlock(&tree->lock);
759	return entry;
760}
761
762/*
763 * After an extent is done, call this to conditionally update the on disk
764 * i_size.  i_size is updated to cover any fully written part of the file.
 
 
 
 
 
765 */
766int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
767				struct btrfs_ordered_extent *ordered)
768{
769	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
770	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
771	u64 disk_i_size;
772	u64 new_i_size;
773	u64 i_size_test;
774	u64 i_size = i_size_read(inode);
775	struct rb_node *node;
776	struct rb_node *prev = NULL;
777	struct btrfs_ordered_extent *test;
778	int ret = 1;
779
780	if (ordered)
781		offset = entry_end(ordered);
782	else
783		offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
784
785	spin_lock(&tree->lock);
786	disk_i_size = BTRFS_I(inode)->disk_i_size;
787
788	/* truncate file */
789	if (disk_i_size > i_size) {
790		BTRFS_I(inode)->disk_i_size = i_size;
791		ret = 0;
792		goto out;
793	}
794
 
 
795	/*
796	 * if the disk i_size is already at the inode->i_size, or
797	 * this ordered extent is inside the disk i_size, we're done
 
 
798	 */
799	if (disk_i_size == i_size || offset <= disk_i_size) {
800		goto out;
801	}
802
803	/*
804	 * we can't update the disk_isize if there are delalloc bytes
805	 * between disk_i_size and  this ordered extent
806	 */
807	if (test_range_bit(io_tree, disk_i_size, offset - 1,
808			   EXTENT_DELALLOC, 0, NULL)) {
809		goto out;
810	}
811	/*
812	 * walk backward from this ordered extent to disk_i_size.
813	 * if we find an ordered extent then we can't update disk i_size
814	 * yet
815	 */
816	if (ordered) {
817		node = rb_prev(&ordered->rb_node);
818	} else {
819		prev = tree_search(tree, offset);
820		/*
821		 * we insert file extents without involving ordered struct,
822		 * so there should be no ordered struct cover this offset
823		 */
824		if (prev) {
825			test = rb_entry(prev, struct btrfs_ordered_extent,
826					rb_node);
827			BUG_ON(offset_in_entry(test, offset));
828		}
829		node = prev;
830	}
831	while (node) {
832		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
833		if (test->file_offset + test->len <= disk_i_size)
834			break;
835		if (test->file_offset >= i_size)
836			break;
837		if (test->file_offset >= disk_i_size)
838			goto out;
839		node = rb_prev(node);
840	}
841	new_i_size = min_t(u64, offset, i_size);
842
843	/*
844	 * at this point, we know we can safely update i_size to at least
845	 * the offset from this ordered extent.  But, we need to
846	 * walk forward and see if ios from higher up in the file have
847	 * finished.
848	 */
849	if (ordered) {
850		node = rb_next(&ordered->rb_node);
851	} else {
852		if (prev)
853			node = rb_next(prev);
854		else
855			node = rb_first(&tree->tree);
856	}
857	i_size_test = 0;
858	if (node) {
859		/*
860		 * do we have an area where IO might have finished
861		 * between our ordered extent and the next one.
862		 */
863		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
864		if (test->file_offset > offset)
865			i_size_test = test->file_offset;
866	} else {
867		i_size_test = i_size;
868	}
869
870	/*
871	 * i_size_test is the end of a region after this ordered
872	 * extent where there are no ordered extents.  As long as there
873	 * are no delalloc bytes in this area, it is safe to update
874	 * disk_i_size to the end of the region.
875	 */
876	if (i_size_test > offset &&
877	    !test_range_bit(io_tree, offset, i_size_test - 1,
878			    EXTENT_DELALLOC, 0, NULL)) {
879		new_i_size = min_t(u64, i_size_test, i_size);
880	}
881	BTRFS_I(inode)->disk_i_size = new_i_size;
882	ret = 0;
883out:
884	/*
885	 * we need to remove the ordered extent with the tree lock held
886	 * so that other people calling this function don't find our fully
887	 * processed ordered entry and skip updating the i_size
888	 */
889	if (ordered)
890		__btrfs_remove_ordered_extent(inode, ordered);
891	spin_unlock(&tree->lock);
892	if (ordered)
893		wake_up(&ordered->wait);
894	return ret;
895}
896
897/*
898 * search the ordered extents for one corresponding to 'offset' and
899 * try to find a checksum.  This is used because we allow pages to
900 * be reclaimed before their checksum is actually put into the btree
 
 
 
 
 
 
 
 
 
901 */
902int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
903			   u32 *sum)
 
904{
905	struct btrfs_ordered_sum *ordered_sum;
906	struct btrfs_sector_sum *sector_sums;
907	struct btrfs_ordered_extent *ordered;
908	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
909	unsigned long num_sectors;
910	unsigned long i;
911	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
912	int ret = 1;
913
914	ordered = btrfs_lookup_ordered_extent(inode, offset);
915	if (!ordered)
916		return 1;
917
918	spin_lock(&tree->lock);
919	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
920		if (disk_bytenr >= ordered_sum->bytenr) {
921			num_sectors = ordered_sum->len / sectorsize;
922			sector_sums = ordered_sum->sums;
923			for (i = 0; i < num_sectors; i++) {
924				if (sector_sums[i].bytenr == disk_bytenr) {
925					*sum = sector_sums[i].sum;
926					ret = 0;
927					goto out;
928				}
929			}
 
930		}
 
 
 
931	}
932out:
933	spin_unlock(&tree->lock);
934	btrfs_put_ordered_extent(ordered);
935	return ret;
936}
937
938
939/*
940 * add a given inode to the list of inodes that must be fully on
941 * disk before a transaction commit finishes.
942 *
943 * This basically gives us the ext3 style data=ordered mode, and it is mostly
944 * used to make sure renamed files are fully on disk.
945 *
946 * It is a noop if the inode is already fully on disk.
947 *
948 * If trans is not null, we'll do a friendly check for a transaction that
949 * is already flushing things and force the IO down ourselves.
950 */
951int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
952				struct btrfs_root *root,
953				struct inode *inode)
954{
955	u64 last_mod;
956
957	last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
 
958
959	/*
960	 * if this file hasn't been changed since the last transaction
961	 * commit, we can safely return without doing anything
962	 */
963	if (last_mod < root->fs_info->last_trans_committed)
964		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
965
966	/*
967	 * the transaction is already committing.  Just start the IO and
968	 * don't bother with all of this list nonsense
969	 */
970	if (trans && root->fs_info->running_transaction->blocked) {
971		btrfs_wait_ordered_range(inode, 0, (u64)-1);
972		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
973	}
974
975	spin_lock(&root->fs_info->ordered_extent_lock);
976	if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
977		list_add_tail(&BTRFS_I(inode)->ordered_operations,
978			      &root->fs_info->ordered_operations);
 
 
 
979	}
980	spin_unlock(&root->fs_info->ordered_extent_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
981
982	return 0;
 
 
 
 
 
983}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2007 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/blkdev.h>
   8#include <linux/writeback.h>
   9#include <linux/sched/mm.h>
  10#include "messages.h"
  11#include "misc.h"
  12#include "ctree.h"
  13#include "transaction.h"
  14#include "btrfs_inode.h"
  15#include "extent_io.h"
  16#include "disk-io.h"
  17#include "compression.h"
  18#include "delalloc-space.h"
  19#include "qgroup.h"
  20#include "subpage.h"
  21#include "file.h"
  22
  23static struct kmem_cache *btrfs_ordered_extent_cache;
  24
  25static u64 entry_end(struct btrfs_ordered_extent *entry)
  26{
  27	if (entry->file_offset + entry->num_bytes < entry->file_offset)
  28		return (u64)-1;
  29	return entry->file_offset + entry->num_bytes;
  30}
  31
  32/* returns NULL if the insertion worked, or it returns the node it did find
  33 * in the tree
  34 */
  35static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  36				   struct rb_node *node)
  37{
  38	struct rb_node **p = &root->rb_node;
  39	struct rb_node *parent = NULL;
  40	struct btrfs_ordered_extent *entry;
  41
  42	while (*p) {
  43		parent = *p;
  44		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  45
  46		if (file_offset < entry->file_offset)
  47			p = &(*p)->rb_left;
  48		else if (file_offset >= entry_end(entry))
  49			p = &(*p)->rb_right;
  50		else
  51			return parent;
  52	}
  53
  54	rb_link_node(node, parent, p);
  55	rb_insert_color(node, root);
  56	return NULL;
  57}
  58
  59/*
  60 * look for a given offset in the tree, and if it can't be found return the
  61 * first lesser offset
  62 */
  63static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  64				     struct rb_node **prev_ret)
  65{
  66	struct rb_node *n = root->rb_node;
  67	struct rb_node *prev = NULL;
  68	struct rb_node *test;
  69	struct btrfs_ordered_extent *entry;
  70	struct btrfs_ordered_extent *prev_entry = NULL;
  71
  72	while (n) {
  73		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  74		prev = n;
  75		prev_entry = entry;
  76
  77		if (file_offset < entry->file_offset)
  78			n = n->rb_left;
  79		else if (file_offset >= entry_end(entry))
  80			n = n->rb_right;
  81		else
  82			return n;
  83	}
  84	if (!prev_ret)
  85		return NULL;
  86
  87	while (prev && file_offset >= entry_end(prev_entry)) {
  88		test = rb_next(prev);
  89		if (!test)
  90			break;
  91		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  92				      rb_node);
  93		if (file_offset < entry_end(prev_entry))
  94			break;
  95
  96		prev = test;
  97	}
  98	if (prev)
  99		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
 100				      rb_node);
 101	while (prev && file_offset < entry_end(prev_entry)) {
 102		test = rb_prev(prev);
 103		if (!test)
 104			break;
 105		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
 106				      rb_node);
 107		prev = test;
 108	}
 109	*prev_ret = prev;
 110	return NULL;
 111}
 112
 
 
 
 
 
 
 
 
 
 
 
 113static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
 114			  u64 len)
 115{
 116	if (file_offset + len <= entry->file_offset ||
 117	    entry->file_offset + entry->num_bytes <= file_offset)
 118		return 0;
 119	return 1;
 120}
 121
 122/*
 123 * look find the first ordered struct that has this offset, otherwise
 124 * the first one less than this offset
 125 */
 126static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
 127						  u64 file_offset)
 128{
 
 129	struct rb_node *prev = NULL;
 130	struct rb_node *ret;
 131	struct btrfs_ordered_extent *entry;
 132
 133	if (inode->ordered_tree_last) {
 134		entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
 135				 rb_node);
 136		if (in_range(file_offset, entry->file_offset, entry->num_bytes))
 137			return inode->ordered_tree_last;
 138	}
 139	ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
 140	if (!ret)
 141		ret = prev;
 142	if (ret)
 143		inode->ordered_tree_last = ret;
 144	return ret;
 145}
 146
 147static struct btrfs_ordered_extent *alloc_ordered_extent(
 148			struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
 149			u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
 150			u64 offset, unsigned long flags, int compress_type)
 
 
 
 
 
 
 
 
 
 
 151{
 
 
 152	struct btrfs_ordered_extent *entry;
 153	int ret;
 154	u64 qgroup_rsv = 0;
 155
 156	if (flags &
 157	    ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
 158		/* For nocow write, we can release the qgroup rsv right now */
 159		ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
 160		if (ret < 0)
 161			return ERR_PTR(ret);
 162	} else {
 163		/*
 164		 * The ordered extent has reserved qgroup space, release now
 165		 * and pass the reserved number for qgroup_record to free.
 166		 */
 167		ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
 168		if (ret < 0)
 169			return ERR_PTR(ret);
 170	}
 171	entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
 172	if (!entry)
 173		return ERR_PTR(-ENOMEM);
 174
 175	entry->file_offset = file_offset;
 176	entry->num_bytes = num_bytes;
 177	entry->ram_bytes = ram_bytes;
 178	entry->disk_bytenr = disk_bytenr;
 179	entry->disk_num_bytes = disk_num_bytes;
 180	entry->offset = offset;
 181	entry->bytes_left = num_bytes;
 182	entry->inode = igrab(&inode->vfs_inode);
 183	entry->compress_type = compress_type;
 184	entry->truncated_len = (u64)-1;
 185	entry->qgroup_rsv = qgroup_rsv;
 186	entry->flags = flags;
 187	refcount_set(&entry->refs, 1);
 
 
 
 
 188	init_waitqueue_head(&entry->wait);
 189	INIT_LIST_HEAD(&entry->list);
 190	INIT_LIST_HEAD(&entry->log_list);
 191	INIT_LIST_HEAD(&entry->root_extent_list);
 192	INIT_LIST_HEAD(&entry->work_list);
 193	INIT_LIST_HEAD(&entry->bioc_list);
 194	init_completion(&entry->completion);
 195
 196	/*
 197	 * We don't need the count_max_extents here, we can assume that all of
 198	 * that work has been done at higher layers, so this is truly the
 199	 * smallest the extent is going to get.
 200	 */
 201	spin_lock(&inode->lock);
 202	btrfs_mod_outstanding_extents(inode, 1);
 203	spin_unlock(&inode->lock);
 204
 205	return entry;
 206}
 207
 208static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
 209{
 210	struct btrfs_inode *inode = BTRFS_I(entry->inode);
 211	struct btrfs_root *root = inode->root;
 212	struct btrfs_fs_info *fs_info = root->fs_info;
 213	struct rb_node *node;
 214
 215	trace_btrfs_ordered_extent_add(inode, entry);
 216
 217	percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
 218				 fs_info->delalloc_batch);
 219
 220	/* One ref for the tree. */
 221	refcount_inc(&entry->refs);
 222
 223	spin_lock_irq(&inode->ordered_tree_lock);
 224	node = tree_insert(&inode->ordered_tree, entry->file_offset,
 225			   &entry->rb_node);
 226	if (node)
 227		btrfs_panic(fs_info, -EEXIST,
 228				"inconsistency in ordered tree at offset %llu",
 229				entry->file_offset);
 230	spin_unlock_irq(&inode->ordered_tree_lock);
 231
 232	spin_lock(&root->ordered_extent_lock);
 233	list_add_tail(&entry->root_extent_list,
 234		      &root->ordered_extents);
 235	root->nr_ordered_extents++;
 236	if (root->nr_ordered_extents == 1) {
 237		spin_lock(&fs_info->ordered_root_lock);
 238		BUG_ON(!list_empty(&root->ordered_root));
 239		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
 240		spin_unlock(&fs_info->ordered_root_lock);
 241	}
 242	spin_unlock(&root->ordered_extent_lock);
 243}
 244
 245/*
 246 * Add an ordered extent to the per-inode tree.
 247 *
 248 * @inode:           Inode that this extent is for.
 249 * @file_offset:     Logical offset in file where the extent starts.
 250 * @num_bytes:       Logical length of extent in file.
 251 * @ram_bytes:       Full length of unencoded data.
 252 * @disk_bytenr:     Offset of extent on disk.
 253 * @disk_num_bytes:  Size of extent on disk.
 254 * @offset:          Offset into unencoded data where file data starts.
 255 * @flags:           Flags specifying type of extent (1 << BTRFS_ORDERED_*).
 256 * @compress_type:   Compression algorithm used for data.
 257 *
 258 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
 259 * tree is given a single reference on the ordered extent that was inserted, and
 260 * the returned pointer is given a second reference.
 261 *
 262 * Return: the new ordered extent or error pointer.
 263 */
 264struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
 265			struct btrfs_inode *inode, u64 file_offset,
 266			u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
 267			u64 disk_num_bytes, u64 offset, unsigned long flags,
 268			int compress_type)
 269{
 270	struct btrfs_ordered_extent *entry;
 
 
 
 271
 272	ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
 
 
 
 
 
 
 273
 274	entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes,
 275				     disk_bytenr, disk_num_bytes, offset, flags,
 276				     compress_type);
 277	if (!IS_ERR(entry))
 278		insert_ordered_extent(entry);
 279	return entry;
 
 280}
 281
 282/*
 283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
 284 * when an ordered extent is finished.  If the list covers more than one
 285 * ordered extent, it is split across multiples.
 286 */
 287void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
 288			   struct btrfs_ordered_sum *sum)
 
 289{
 290	struct btrfs_inode *inode = BTRFS_I(entry->inode);
 291
 292	spin_lock_irq(&inode->ordered_tree_lock);
 
 293	list_add_tail(&sum->list, &entry->list);
 294	spin_unlock_irq(&inode->ordered_tree_lock);
 295}
 296
 297static void finish_ordered_fn(struct btrfs_work *work)
 298{
 299	struct btrfs_ordered_extent *ordered_extent;
 300
 301	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
 302	btrfs_finish_ordered_io(ordered_extent);
 303}
 304
 305static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
 306				      struct page *page, u64 file_offset,
 307				      u64 len, bool uptodate)
 308{
 309	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
 310	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 311
 312	lockdep_assert_held(&inode->ordered_tree_lock);
 313
 314	if (page) {
 315		ASSERT(page->mapping);
 316		ASSERT(page_offset(page) <= file_offset);
 317		ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE);
 318
 319		/*
 320		 * Ordered (Private2) bit indicates whether we still have
 321		 * pending io unfinished for the ordered extent.
 322		 *
 323		 * If there's no such bit, we need to skip to next range.
 324		 */
 325		if (!btrfs_folio_test_ordered(fs_info, page_folio(page),
 326					      file_offset, len))
 327			return false;
 328		btrfs_folio_clear_ordered(fs_info, page_folio(page), file_offset, len);
 329	}
 330
 331	/* Now we're fine to update the accounting. */
 332	if (WARN_ON_ONCE(len > ordered->bytes_left)) {
 333		btrfs_crit(fs_info,
 334"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
 335			   inode->root->root_key.objectid, btrfs_ino(inode),
 336			   ordered->file_offset, ordered->num_bytes,
 337			   len, ordered->bytes_left);
 338		ordered->bytes_left = 0;
 339	} else {
 340		ordered->bytes_left -= len;
 341	}
 342
 343	if (!uptodate)
 344		set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
 345
 346	if (ordered->bytes_left)
 347		return false;
 348
 349	/*
 350	 * All the IO of the ordered extent is finished, we need to queue
 351	 * the finish_func to be executed.
 352	 */
 353	set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
 354	cond_wake_up(&ordered->wait);
 355	refcount_inc(&ordered->refs);
 356	trace_btrfs_ordered_extent_mark_finished(inode, ordered);
 357	return true;
 358}
 359
 360static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
 361{
 362	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
 363	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 364	struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
 365		fs_info->endio_freespace_worker : fs_info->endio_write_workers;
 366
 367	btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
 368	btrfs_queue_work(wq, &ordered->work);
 369}
 370
 371bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
 372				 struct page *page, u64 file_offset, u64 len,
 373				 bool uptodate)
 374{
 375	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
 376	unsigned long flags;
 377	bool ret;
 378
 379	trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
 380
 381	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 382	ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
 383	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 384
 385	if (ret)
 386		btrfs_queue_ordered_fn(ordered);
 387	return ret;
 388}
 389
 390/*
 391 * Mark all ordered extents io inside the specified range finished.
 392 *
 393 * @page:	 The involved page for the operation.
 394 *		 For uncompressed buffered IO, the page status also needs to be
 395 *		 updated to indicate whether the pending ordered io is finished.
 396 *		 Can be NULL for direct IO and compressed write.
 397 *		 For these cases, callers are ensured they won't execute the
 398 *		 endio function twice.
 399 *
 400 * This function is called for endio, thus the range must have ordered
 401 * extent(s) covering it.
 402 */
 403void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
 404				    struct page *page, u64 file_offset,
 405				    u64 num_bytes, bool uptodate)
 406{
 
 407	struct rb_node *node;
 408	struct btrfs_ordered_extent *entry = NULL;
 409	unsigned long flags;
 410	u64 cur = file_offset;
 
 
 
 
 
 
 
 
 
 
 411
 412	trace_btrfs_writepage_end_io_hook(inode, file_offset,
 413					  file_offset + num_bytes - 1,
 414					  uptodate);
 415
 416	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 417	while (cur < file_offset + num_bytes) {
 418		u64 entry_end;
 419		u64 end;
 420		u32 len;
 421
 422		node = ordered_tree_search(inode, cur);
 423		/* No ordered extents at all */
 424		if (!node)
 425			break;
 426
 427		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 428		entry_end = entry->file_offset + entry->num_bytes;
 429		/*
 430		 * |<-- OE --->|  |
 431		 *		  cur
 432		 * Go to next OE.
 433		 */
 434		if (cur >= entry_end) {
 435			node = rb_next(node);
 436			/* No more ordered extents, exit */
 437			if (!node)
 438				break;
 439			entry = rb_entry(node, struct btrfs_ordered_extent,
 440					 rb_node);
 441
 442			/* Go to next ordered extent and continue */
 443			cur = entry->file_offset;
 444			continue;
 445		}
 446		/*
 447		 * |	|<--- OE --->|
 448		 * cur
 449		 * Go to the start of OE.
 450		 */
 451		if (cur < entry->file_offset) {
 452			cur = entry->file_offset;
 453			continue;
 454		}
 455
 456		/*
 457		 * Now we are definitely inside one ordered extent.
 458		 *
 459		 * |<--- OE --->|
 460		 *	|
 461		 *	cur
 462		 */
 463		end = min(entry->file_offset + entry->num_bytes,
 464			  file_offset + num_bytes) - 1;
 465		ASSERT(end + 1 - cur < U32_MAX);
 466		len = end + 1 - cur;
 467
 468		if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
 469			spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 470			btrfs_queue_ordered_fn(entry);
 471			spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 472		}
 473		cur += len;
 474	}
 475	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 
 476}
 477
 478/*
 479 * Finish IO for one ordered extent across a given range.  The range can only
 480 * contain one ordered extent.
 
 
 481 *
 482 * @cached:	 The cached ordered extent. If not NULL, we can skip the tree
 483 *               search and use the ordered extent directly.
 484 * 		 Will be also used to store the finished ordered extent.
 485 * @file_offset: File offset for the finished IO
 486 * @io_size:	 Length of the finish IO range
 487 *
 488 * Return true if the ordered extent is finished in the range, and update
 489 * @cached.
 490 * Return false otherwise.
 491 *
 492 * NOTE: The range can NOT cross multiple ordered extents.
 493 * Thus caller should ensure the range doesn't cross ordered extents.
 494 */
 495bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
 496				    struct btrfs_ordered_extent **cached,
 497				    u64 file_offset, u64 io_size)
 498{
 
 499	struct rb_node *node;
 500	struct btrfs_ordered_extent *entry = NULL;
 501	unsigned long flags;
 502	bool finished = false;
 503
 504	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 505	if (cached && *cached) {
 506		entry = *cached;
 507		goto have_entry;
 
 
 508	}
 509
 510	node = ordered_tree_search(inode, file_offset);
 511	if (!node)
 512		goto out;
 513
 514	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 515have_entry:
 516	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 517		goto out;
 
 518
 519	if (io_size > entry->bytes_left)
 520		btrfs_crit(inode->root->fs_info,
 521			   "bad ordered accounting left %llu size %llu",
 522		       entry->bytes_left, io_size);
 523
 524	entry->bytes_left -= io_size;
 525
 526	if (entry->bytes_left == 0) {
 527		/*
 528		 * Ensure only one caller can set the flag and finished_ret
 529		 * accordingly
 530		 */
 531		finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
 532		/* test_and_set_bit implies a barrier */
 533		cond_wake_up_nomb(&entry->wait);
 534	}
 535out:
 536	if (finished && cached && entry) {
 537		*cached = entry;
 538		refcount_inc(&entry->refs);
 539		trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
 540	}
 541	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 542	return finished;
 543}
 544
 545/*
 546 * used to drop a reference on an ordered extent.  This will free
 547 * the extent if the last reference is dropped
 548 */
 549void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 550{
 551	struct list_head *cur;
 552	struct btrfs_ordered_sum *sum;
 553
 554	trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
 555
 556	if (refcount_dec_and_test(&entry->refs)) {
 557		ASSERT(list_empty(&entry->root_extent_list));
 558		ASSERT(list_empty(&entry->log_list));
 559		ASSERT(RB_EMPTY_NODE(&entry->rb_node));
 560		if (entry->inode)
 561			btrfs_add_delayed_iput(BTRFS_I(entry->inode));
 562		while (!list_empty(&entry->list)) {
 563			cur = entry->list.next;
 564			sum = list_entry(cur, struct btrfs_ordered_sum, list);
 565			list_del(&sum->list);
 566			kvfree(sum);
 567		}
 568		kmem_cache_free(btrfs_ordered_extent_cache, entry);
 569	}
 
 570}
 571
 572/*
 573 * remove an ordered extent from the tree.  No references are dropped
 574 * and waiters are woken up.
 
 575 */
 576void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
 577				 struct btrfs_ordered_extent *entry)
 578{
 579	struct btrfs_root *root = btrfs_inode->root;
 580	struct btrfs_fs_info *fs_info = root->fs_info;
 581	struct rb_node *node;
 582	bool pending;
 583	bool freespace_inode;
 584
 585	/*
 586	 * If this is a free space inode the thread has not acquired the ordered
 587	 * extents lockdep map.
 588	 */
 589	freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
 590
 591	btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
 592	/* This is paired with btrfs_alloc_ordered_extent. */
 593	spin_lock(&btrfs_inode->lock);
 594	btrfs_mod_outstanding_extents(btrfs_inode, -1);
 595	spin_unlock(&btrfs_inode->lock);
 596	if (root != fs_info->tree_root) {
 597		u64 release;
 598
 599		if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
 600			release = entry->disk_num_bytes;
 601		else
 602			release = entry->num_bytes;
 603		btrfs_delalloc_release_metadata(btrfs_inode, release,
 604						test_bit(BTRFS_ORDERED_IOERR,
 605							 &entry->flags));
 606	}
 607
 608	percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
 609				 fs_info->delalloc_batch);
 610
 611	spin_lock_irq(&btrfs_inode->ordered_tree_lock);
 612	node = &entry->rb_node;
 613	rb_erase(node, &btrfs_inode->ordered_tree);
 614	RB_CLEAR_NODE(node);
 615	if (btrfs_inode->ordered_tree_last == node)
 616		btrfs_inode->ordered_tree_last = NULL;
 617	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
 618	pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
 619	spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
 620
 621	/*
 622	 * The current running transaction is waiting on us, we need to let it
 623	 * know that we're complete and wake it up.
 
 624	 */
 625	if (pending) {
 626		struct btrfs_transaction *trans;
 627
 628		/*
 629		 * The checks for trans are just a formality, it should be set,
 630		 * but if it isn't we don't want to deref/assert under the spin
 631		 * lock, so be nice and check if trans is set, but ASSERT() so
 632		 * if it isn't set a developer will notice.
 633		 */
 634		spin_lock(&fs_info->trans_lock);
 635		trans = fs_info->running_transaction;
 636		if (trans)
 637			refcount_inc(&trans->use_count);
 638		spin_unlock(&fs_info->trans_lock);
 639
 640		ASSERT(trans || BTRFS_FS_ERROR(fs_info));
 641		if (trans) {
 642			if (atomic_dec_and_test(&trans->pending_ordered))
 643				wake_up(&trans->pending_wait);
 644			btrfs_put_transaction(trans);
 645		}
 646	}
 
 647
 648	btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
 
 649
 650	spin_lock(&root->ordered_extent_lock);
 651	list_del_init(&entry->root_extent_list);
 652	root->nr_ordered_extents--;
 653
 654	trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
 
 
 
 
 655
 656	if (!root->nr_ordered_extents) {
 657		spin_lock(&fs_info->ordered_root_lock);
 658		BUG_ON(list_empty(&root->ordered_root));
 659		list_del_init(&root->ordered_root);
 660		spin_unlock(&fs_info->ordered_root_lock);
 661	}
 662	spin_unlock(&root->ordered_extent_lock);
 663	wake_up(&entry->wait);
 664	if (!freespace_inode)
 665		btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
 666}
 667
 668static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 669{
 670	struct btrfs_ordered_extent *ordered;
 671
 672	ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
 673	btrfs_start_ordered_extent(ordered);
 674	complete(&ordered->completion);
 675}
 676
 677/*
 678 * wait for all the ordered extents in a root.  This is done when balancing
 679 * space between drives.
 680 */
 681u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
 682			       const u64 range_start, const u64 range_len)
 683{
 684	struct btrfs_fs_info *fs_info = root->fs_info;
 685	LIST_HEAD(splice);
 686	LIST_HEAD(skipped);
 687	LIST_HEAD(works);
 688	struct btrfs_ordered_extent *ordered, *next;
 689	u64 count = 0;
 690	const u64 range_end = range_start + range_len;
 691
 692	mutex_lock(&root->ordered_extent_mutex);
 693	spin_lock(&root->ordered_extent_lock);
 694	list_splice_init(&root->ordered_extents, &splice);
 695	while (!list_empty(&splice) && nr) {
 696		ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
 697					   root_extent_list);
 698
 699		if (range_end <= ordered->disk_bytenr ||
 700		    ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
 701			list_move_tail(&ordered->root_extent_list, &skipped);
 702			cond_resched_lock(&root->ordered_extent_lock);
 703			continue;
 704		}
 705
 706		list_move_tail(&ordered->root_extent_list,
 707			       &root->ordered_extents);
 708		refcount_inc(&ordered->refs);
 709		spin_unlock(&root->ordered_extent_lock);
 710
 711		btrfs_init_work(&ordered->flush_work,
 712				btrfs_run_ordered_extent_work, NULL);
 713		list_add_tail(&ordered->work_list, &works);
 714		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 715
 716		cond_resched();
 717		spin_lock(&root->ordered_extent_lock);
 718		if (nr != U64_MAX)
 719			nr--;
 720		count++;
 721	}
 722	list_splice_tail(&skipped, &root->ordered_extents);
 723	list_splice_tail(&splice, &root->ordered_extents);
 724	spin_unlock(&root->ordered_extent_lock);
 725
 726	list_for_each_entry_safe(ordered, next, &works, work_list) {
 727		list_del_init(&ordered->work_list);
 728		wait_for_completion(&ordered->completion);
 729		btrfs_put_ordered_extent(ordered);
 730		cond_resched();
 
 
 
 
 731	}
 732	mutex_unlock(&root->ordered_extent_mutex);
 733
 734	return count;
 735}
 736
 737void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
 738			     const u64 range_start, const u64 range_len)
 
 
 
 
 
 
 
 
 
 739{
 740	struct btrfs_root *root;
 741	LIST_HEAD(splice);
 742	u64 done;
 743
 744	mutex_lock(&fs_info->ordered_operations_mutex);
 745	spin_lock(&fs_info->ordered_root_lock);
 746	list_splice_init(&fs_info->ordered_roots, &splice);
 747	while (!list_empty(&splice) && nr) {
 748		root = list_first_entry(&splice, struct btrfs_root,
 749					ordered_root);
 750		root = btrfs_grab_root(root);
 751		BUG_ON(!root);
 752		list_move_tail(&root->ordered_root,
 753			       &fs_info->ordered_roots);
 754		spin_unlock(&fs_info->ordered_root_lock);
 755
 756		done = btrfs_wait_ordered_extents(root, nr,
 757						  range_start, range_len);
 758		btrfs_put_root(root);
 759
 760		spin_lock(&fs_info->ordered_root_lock);
 761		if (nr != U64_MAX) {
 762			nr -= done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 763		}
 
 
 
 764	}
 765	list_splice_tail(&splice, &fs_info->ordered_roots);
 766	spin_unlock(&fs_info->ordered_root_lock);
 767	mutex_unlock(&fs_info->ordered_operations_mutex);
 
 
 
 
 768}
 769
 770/*
 771 * Start IO and wait for a given ordered extent to finish.
 772 *
 773 * Wait on page writeback for all the pages in the extent and the IO completion
 774 * code to insert metadata into the btree corresponding to the extent.
 775 */
 776void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
 
 
 
 777{
 778	u64 start = entry->file_offset;
 779	u64 end = start + entry->num_bytes - 1;
 780	struct btrfs_inode *inode = BTRFS_I(entry->inode);
 781	bool freespace_inode;
 782
 783	trace_btrfs_ordered_extent_start(inode, entry);
 784
 785	/*
 786	 * If this is a free space inode do not take the ordered extents lockdep
 787	 * map.
 788	 */
 789	freespace_inode = btrfs_is_free_space_inode(inode);
 790
 791	/*
 792	 * pages in the range can be dirty, clean or writeback.  We
 793	 * start IO on any dirty ones so the wait doesn't stall waiting
 794	 * for the flusher thread to find them
 795	 */
 796	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
 797		filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
 798
 799	if (!freespace_inode)
 800		btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
 801	wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
 802}
 803
 804/*
 805 * Used to wait on ordered extents across a large range of bytes.
 806 */
 807int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 808{
 809	int ret = 0;
 810	int ret_wb = 0;
 811	u64 end;
 812	u64 orig_end;
 813	struct btrfs_ordered_extent *ordered;
 
 814
 815	if (start + len < start) {
 816		orig_end = OFFSET_MAX;
 817	} else {
 818		orig_end = start + len - 1;
 819		if (orig_end > OFFSET_MAX)
 820			orig_end = OFFSET_MAX;
 821	}
 822
 823	/* start IO across the range first to instantiate any delalloc
 824	 * extents
 825	 */
 826	ret = btrfs_fdatawrite_range(inode, start, orig_end);
 827	if (ret)
 828		return ret;
 829
 830	/*
 831	 * If we have a writeback error don't return immediately. Wait first
 832	 * for any ordered extents that haven't completed yet. This is to make
 833	 * sure no one can dirty the same page ranges and call writepages()
 834	 * before the ordered extents complete - to avoid failures (-EEXIST)
 835	 * when adding the new ordered extents to the ordered tree.
 836	 */
 837	ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 
 
 838
 839	end = orig_end;
 
 840	while (1) {
 841		ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
 842		if (!ordered)
 843			break;
 844		if (ordered->file_offset > orig_end) {
 845			btrfs_put_ordered_extent(ordered);
 846			break;
 847		}
 848		if (ordered->file_offset + ordered->num_bytes <= start) {
 849			btrfs_put_ordered_extent(ordered);
 850			break;
 851		}
 852		btrfs_start_ordered_extent(ordered);
 
 853		end = ordered->file_offset;
 854		/*
 855		 * If the ordered extent had an error save the error but don't
 856		 * exit without waiting first for all other ordered extents in
 857		 * the range to complete.
 858		 */
 859		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
 860			ret = -EIO;
 861		btrfs_put_ordered_extent(ordered);
 862		if (end == 0 || end == start)
 863			break;
 864		end--;
 865	}
 866	return ret_wb ? ret_wb : ret;
 
 
 
 
 
 867}
 868
 869/*
 870 * find an ordered extent corresponding to file_offset.  return NULL if
 871 * nothing is found, otherwise take a reference on the extent and return it
 872 */
 873struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
 874							 u64 file_offset)
 875{
 
 876	struct rb_node *node;
 877	struct btrfs_ordered_extent *entry = NULL;
 878	unsigned long flags;
 879
 880	spin_lock_irqsave(&inode->ordered_tree_lock, flags);
 881	node = ordered_tree_search(inode, file_offset);
 
 882	if (!node)
 883		goto out;
 884
 885	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 886	if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
 887		entry = NULL;
 888	if (entry) {
 889		refcount_inc(&entry->refs);
 890		trace_btrfs_ordered_extent_lookup(inode, entry);
 891	}
 892out:
 893	spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
 894	return entry;
 895}
 896
 897/* Since the DIO code tries to lock a wide area we need to look for any ordered
 898 * extents that exist in the range, rather than just the start of the range.
 899 */
 900struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
 901		struct btrfs_inode *inode, u64 file_offset, u64 len)
 
 902{
 
 903	struct rb_node *node;
 904	struct btrfs_ordered_extent *entry = NULL;
 905
 906	spin_lock_irq(&inode->ordered_tree_lock);
 907	node = ordered_tree_search(inode, file_offset);
 
 908	if (!node) {
 909		node = ordered_tree_search(inode, file_offset + len);
 910		if (!node)
 911			goto out;
 912	}
 913
 914	while (1) {
 915		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 916		if (range_overlaps(entry, file_offset, len))
 917			break;
 918
 919		if (entry->file_offset >= file_offset + len) {
 920			entry = NULL;
 921			break;
 922		}
 923		entry = NULL;
 924		node = rb_next(node);
 925		if (!node)
 926			break;
 927	}
 928out:
 929	if (entry) {
 930		refcount_inc(&entry->refs);
 931		trace_btrfs_ordered_extent_lookup_range(inode, entry);
 932	}
 933	spin_unlock_irq(&inode->ordered_tree_lock);
 934	return entry;
 935}
 936
 937/*
 938 * Adds all ordered extents to the given list. The list ends up sorted by the
 939 * file_offset of the ordered extents.
 940 */
 941void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
 942					   struct list_head *list)
 943{
 944	struct rb_node *n;
 945
 946	ASSERT(inode_is_locked(&inode->vfs_inode));
 947
 948	spin_lock_irq(&inode->ordered_tree_lock);
 949	for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
 950		struct btrfs_ordered_extent *ordered;
 951
 952		ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
 953
 954		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
 955			continue;
 956
 957		ASSERT(list_empty(&ordered->log_list));
 958		list_add_tail(&ordered->log_list, list);
 959		refcount_inc(&ordered->refs);
 960		trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
 961	}
 962	spin_unlock_irq(&inode->ordered_tree_lock);
 963}
 964
 965/*
 966 * lookup and return any extent before 'file_offset'.  NULL is returned
 967 * if none is found
 968 */
 969struct btrfs_ordered_extent *
 970btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
 971{
 
 972	struct rb_node *node;
 973	struct btrfs_ordered_extent *entry = NULL;
 974
 975	spin_lock_irq(&inode->ordered_tree_lock);
 976	node = ordered_tree_search(inode, file_offset);
 
 977	if (!node)
 978		goto out;
 979
 980	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 981	refcount_inc(&entry->refs);
 982	trace_btrfs_ordered_extent_lookup_first(inode, entry);
 983out:
 984	spin_unlock_irq(&inode->ordered_tree_lock);
 985	return entry;
 986}
 987
 988/*
 989 * Lookup the first ordered extent that overlaps the range
 990 * [@file_offset, @file_offset + @len).
 991 *
 992 * The difference between this and btrfs_lookup_first_ordered_extent() is
 993 * that this one won't return any ordered extent that does not overlap the range.
 994 * And the difference against btrfs_lookup_ordered_extent() is, this function
 995 * ensures the first ordered extent gets returned.
 996 */
 997struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
 998			struct btrfs_inode *inode, u64 file_offset, u64 len)
 999{
 
 
 
 
 
 
1000	struct rb_node *node;
1001	struct rb_node *cur;
1002	struct rb_node *prev;
1003	struct rb_node *next;
1004	struct btrfs_ordered_extent *entry = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1005
1006	spin_lock_irq(&inode->ordered_tree_lock);
1007	node = inode->ordered_tree.rb_node;
1008	/*
1009	 * Here we don't want to use tree_search() which will use tree->last
1010	 * and screw up the search order.
1011	 * And __tree_search() can't return the adjacent ordered extents
1012	 * either, thus here we do our own search.
1013	 */
1014	while (node) {
1015		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
 
1016
1017		if (file_offset < entry->file_offset) {
1018			node = node->rb_left;
1019		} else if (file_offset >= entry_end(entry)) {
1020			node = node->rb_right;
1021		} else {
1022			/*
1023			 * Direct hit, got an ordered extent that starts at
1024			 * @file_offset
1025			 */
1026			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1027		}
 
1028	}
1029	if (!entry) {
1030		/* Empty tree */
1031		goto out;
 
 
 
 
 
 
1032	}
 
1033
1034	cur = &entry->rb_node;
1035	/* We got an entry around @file_offset, check adjacent entries */
1036	if (entry->file_offset < file_offset) {
1037		prev = cur;
1038		next = rb_next(cur);
 
 
 
1039	} else {
1040		prev = rb_prev(cur);
1041		next = cur;
 
 
1042	}
1043	if (prev) {
1044		entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1045		if (range_overlaps(entry, file_offset, len))
1046			goto out;
 
 
 
 
 
 
 
1047	}
1048	if (next) {
1049		entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1050		if (range_overlaps(entry, file_offset, len))
1051			goto out;
 
 
 
 
 
 
 
1052	}
1053	/* No ordered extent in the range */
1054	entry = NULL;
1055out:
1056	if (entry) {
1057		refcount_inc(&entry->refs);
1058		trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1059	}
1060
1061	spin_unlock_irq(&inode->ordered_tree_lock);
1062	return entry;
 
 
 
 
1063}
1064
1065/*
1066 * Lock the passed range and ensures all pending ordered extents in it are run
1067 * to completion.
1068 *
1069 * @inode:        Inode whose ordered tree is to be searched
1070 * @start:        Beginning of range to flush
1071 * @end:          Last byte of range to lock
1072 * @cached_state: If passed, will return the extent state responsible for the
1073 *                locked range. It's the caller's responsibility to free the
1074 *                cached state.
1075 *
1076 * Always return with the given range locked, ensuring after it's called no
1077 * order extent can be pending.
1078 */
1079void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1080					u64 end,
1081					struct extent_state **cached_state)
1082{
 
 
1083	struct btrfs_ordered_extent *ordered;
1084	struct extent_state *cache = NULL;
1085	struct extent_state **cachedp = &cache;
 
 
 
1086
1087	if (cached_state)
1088		cachedp = cached_state;
 
1089
1090	while (1) {
1091		lock_extent(&inode->io_tree, start, end, cachedp);
1092		ordered = btrfs_lookup_ordered_range(inode, start,
1093						     end - start + 1);
1094		if (!ordered) {
1095			/*
1096			 * If no external cached_state has been passed then
1097			 * decrement the extra ref taken for cachedp since we
1098			 * aren't exposing it outside of this function
1099			 */
1100			if (!cached_state)
1101				refcount_dec(&cache->refs);
1102			break;
1103		}
1104		unlock_extent(&inode->io_tree, start, end, cachedp);
1105		btrfs_start_ordered_extent(ordered);
1106		btrfs_put_ordered_extent(ordered);
1107	}
 
 
 
 
1108}
1109
 
1110/*
1111 * Lock the passed range and ensure all pending ordered extents in it are run
1112 * to completion in nowait mode.
 
 
 
 
 
1113 *
1114 * Return true if btrfs_lock_ordered_range does not return any extents,
1115 * otherwise false.
1116 */
1117bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1118				  struct extent_state **cached_state)
 
1119{
1120	struct btrfs_ordered_extent *ordered;
1121
1122	if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1123		return false;
1124
1125	ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1126	if (!ordered)
1127		return true;
1128
1129	btrfs_put_ordered_extent(ordered);
1130	unlock_extent(&inode->io_tree, start, end, cached_state);
1131
1132	return false;
1133}
1134
1135/* Split out a new ordered extent for this first @len bytes of @ordered. */
1136struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1137			struct btrfs_ordered_extent *ordered, u64 len)
1138{
1139	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1140	struct btrfs_root *root = inode->root;
1141	struct btrfs_fs_info *fs_info = root->fs_info;
1142	u64 file_offset = ordered->file_offset;
1143	u64 disk_bytenr = ordered->disk_bytenr;
1144	unsigned long flags = ordered->flags;
1145	struct btrfs_ordered_sum *sum, *tmpsum;
1146	struct btrfs_ordered_extent *new;
1147	struct rb_node *node;
1148	u64 offset = 0;
1149
1150	trace_btrfs_ordered_extent_split(inode, ordered);
1151
1152	ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1153
1154	/*
1155	 * The entire bio must be covered by the ordered extent, but we can't
1156	 * reduce the original extent to a zero length either.
1157	 */
1158	if (WARN_ON_ONCE(len >= ordered->num_bytes))
1159		return ERR_PTR(-EINVAL);
1160	/* We cannot split partially completed ordered extents. */
1161	if (ordered->bytes_left) {
1162		ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1163		if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1164			return ERR_PTR(-EINVAL);
1165	}
1166	/* We cannot split a compressed ordered extent. */
1167	if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1168		return ERR_PTR(-EINVAL);
1169
1170	new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1171				   len, 0, flags, ordered->compress_type);
1172	if (IS_ERR(new))
1173		return new;
1174
1175	/* One ref for the tree. */
1176	refcount_inc(&new->refs);
1177
1178	spin_lock_irq(&root->ordered_extent_lock);
1179	spin_lock(&inode->ordered_tree_lock);
1180	/* Remove from tree once */
1181	node = &ordered->rb_node;
1182	rb_erase(node, &inode->ordered_tree);
1183	RB_CLEAR_NODE(node);
1184	if (inode->ordered_tree_last == node)
1185		inode->ordered_tree_last = NULL;
1186
1187	ordered->file_offset += len;
1188	ordered->disk_bytenr += len;
1189	ordered->num_bytes -= len;
1190	ordered->disk_num_bytes -= len;
1191	ordered->ram_bytes -= len;
1192
1193	if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1194		ASSERT(ordered->bytes_left == 0);
1195		new->bytes_left = 0;
1196	} else {
1197		ordered->bytes_left -= len;
1198	}
1199
1200	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1201		if (ordered->truncated_len > len) {
1202			ordered->truncated_len -= len;
1203		} else {
1204			new->truncated_len = ordered->truncated_len;
1205			ordered->truncated_len = 0;
1206		}
1207	}
1208
1209	list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1210		if (offset == len)
1211			break;
1212		list_move_tail(&sum->list, &new->list);
1213		offset += sum->len;
1214	}
1215
1216	/* Re-insert the node */
1217	node = tree_insert(&inode->ordered_tree, ordered->file_offset,
1218			   &ordered->rb_node);
1219	if (node)
1220		btrfs_panic(fs_info, -EEXIST,
1221			"zoned: inconsistency in ordered tree at offset %llu",
1222			ordered->file_offset);
1223
1224	node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1225	if (node)
1226		btrfs_panic(fs_info, -EEXIST,
1227			"zoned: inconsistency in ordered tree at offset %llu",
1228			new->file_offset);
1229	spin_unlock(&inode->ordered_tree_lock);
1230
1231	list_add_tail(&new->root_extent_list, &root->ordered_extents);
1232	root->nr_ordered_extents++;
1233	spin_unlock_irq(&root->ordered_extent_lock);
1234	return new;
1235}
1236
1237int __init ordered_data_init(void)
1238{
1239	btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
1240	if (!btrfs_ordered_extent_cache)
1241		return -ENOMEM;
1242
1243	return 0;
1244}
1245
1246void __cold ordered_data_exit(void)
1247{
1248	kmem_cache_destroy(btrfs_ordered_extent_cache);
1249}