Linux Audio

Check our new training course

Loading...
v3.1
  1#include <linux/err.h>
  2#include <linux/slab.h>
  3#include <linux/module.h>
  4#include <linux/spinlock.h>
  5#include <linux/hardirq.h>
  6#include "ctree.h"
  7#include "extent_map.h"
  8
  9
 10static struct kmem_cache *extent_map_cache;
 11
 12int __init extent_map_init(void)
 13{
 14	extent_map_cache = kmem_cache_create("extent_map",
 15			sizeof(struct extent_map), 0,
 16			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 17	if (!extent_map_cache)
 18		return -ENOMEM;
 19	return 0;
 20}
 21
 22void extent_map_exit(void)
 23{
 24	if (extent_map_cache)
 25		kmem_cache_destroy(extent_map_cache);
 26}
 27
 28/**
 29 * extent_map_tree_init - initialize extent map tree
 30 * @tree:		tree to initialize
 31 *
 32 * Initialize the extent tree @tree.  Should be called for each new inode
 33 * or other user of the extent_map interface.
 34 */
 35void extent_map_tree_init(struct extent_map_tree *tree)
 36{
 37	tree->map = RB_ROOT;
 
 38	rwlock_init(&tree->lock);
 39}
 40
 41/**
 42 * alloc_extent_map - allocate new extent map structure
 43 *
 44 * Allocate a new extent_map structure.  The new structure is
 45 * returned with a reference count of one and needs to be
 46 * freed using free_extent_map()
 47 */
 48struct extent_map *alloc_extent_map(void)
 49{
 50	struct extent_map *em;
 51	em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
 52	if (!em)
 53		return NULL;
 54	em->in_tree = 0;
 55	em->flags = 0;
 56	em->compress_type = BTRFS_COMPRESS_NONE;
 
 57	atomic_set(&em->refs, 1);
 
 58	return em;
 59}
 60
 61/**
 62 * free_extent_map - drop reference count of an extent_map
 63 * @em:		extent map beeing releasead
 64 *
 65 * Drops the reference out on @em by one and free the structure
 66 * if the reference count hits zero.
 67 */
 68void free_extent_map(struct extent_map *em)
 69{
 70	if (!em)
 71		return;
 72	WARN_ON(atomic_read(&em->refs) == 0);
 73	if (atomic_dec_and_test(&em->refs)) {
 74		WARN_ON(em->in_tree);
 
 75		kmem_cache_free(extent_map_cache, em);
 76	}
 77}
 78
 79static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
 80				   struct rb_node *node)
 
 
 
 
 
 
 
 81{
 82	struct rb_node **p = &root->rb_node;
 83	struct rb_node *parent = NULL;
 84	struct extent_map *entry;
 
 
 85
 86	while (*p) {
 87		parent = *p;
 88		entry = rb_entry(parent, struct extent_map, rb_node);
 89
 90		WARN_ON(!entry->in_tree);
 91
 92		if (offset < entry->start)
 93			p = &(*p)->rb_left;
 94		else if (offset >= extent_map_end(entry))
 95			p = &(*p)->rb_right;
 96		else
 97			return parent;
 98	}
 99
100	entry = rb_entry(node, struct extent_map, rb_node);
101	entry->in_tree = 1;
102	rb_link_node(node, parent, p);
103	rb_insert_color(node, root);
104	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105}
106
107/*
108 * search through the tree for an extent_map with a given offset.  If
109 * it can't be found, try to find some neighboring extents
110 */
111static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
112				     struct rb_node **prev_ret,
113				     struct rb_node **next_ret)
114{
115	struct rb_node *n = root->rb_node;
116	struct rb_node *prev = NULL;
117	struct rb_node *orig_prev = NULL;
118	struct extent_map *entry;
119	struct extent_map *prev_entry = NULL;
120
121	while (n) {
122		entry = rb_entry(n, struct extent_map, rb_node);
123		prev = n;
124		prev_entry = entry;
125
126		WARN_ON(!entry->in_tree);
127
128		if (offset < entry->start)
129			n = n->rb_left;
130		else if (offset >= extent_map_end(entry))
131			n = n->rb_right;
132		else
133			return n;
134	}
135
136	if (prev_ret) {
137		orig_prev = prev;
138		while (prev && offset >= extent_map_end(prev_entry)) {
139			prev = rb_next(prev);
140			prev_entry = rb_entry(prev, struct extent_map, rb_node);
141		}
142		*prev_ret = prev;
143		prev = orig_prev;
144	}
145
146	if (next_ret) {
147		prev_entry = rb_entry(prev, struct extent_map, rb_node);
148		while (prev && offset < prev_entry->start) {
149			prev = rb_prev(prev);
150			prev_entry = rb_entry(prev, struct extent_map, rb_node);
151		}
152		*next_ret = prev;
153	}
154	return NULL;
155}
156
157/* check to see if two extent_map structs are adjacent and safe to merge */
158static int mergable_maps(struct extent_map *prev, struct extent_map *next)
159{
160	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
161		return 0;
162
163	/*
164	 * don't merge compressed extents, we need to know their
165	 * actual size
166	 */
167	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
168		return 0;
169
 
 
 
 
 
 
 
 
 
 
 
 
170	if (extent_map_end(prev) == next->start &&
171	    prev->flags == next->flags &&
172	    prev->bdev == next->bdev &&
173	    ((next->block_start == EXTENT_MAP_HOLE &&
174	      prev->block_start == EXTENT_MAP_HOLE) ||
175	     (next->block_start == EXTENT_MAP_INLINE &&
176	      prev->block_start == EXTENT_MAP_INLINE) ||
177	     (next->block_start == EXTENT_MAP_DELALLOC &&
178	      prev->block_start == EXTENT_MAP_DELALLOC) ||
179	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
180	      next->block_start == extent_map_block_end(prev)))) {
181		return 1;
182	}
183	return 0;
184}
185
186static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
187{
188	struct extent_map *merge = NULL;
189	struct rb_node *rb;
190
191	if (em->start != 0) {
192		rb = rb_prev(&em->rb_node);
193		if (rb)
194			merge = rb_entry(rb, struct extent_map, rb_node);
195		if (rb && mergable_maps(merge, em)) {
196			em->start = merge->start;
 
197			em->len += merge->len;
198			em->block_len += merge->block_len;
199			em->block_start = merge->block_start;
200			merge->in_tree = 0;
 
 
 
201			rb_erase(&merge->rb_node, &tree->map);
 
202			free_extent_map(merge);
203		}
204	}
205
206	rb = rb_next(&em->rb_node);
207	if (rb)
208		merge = rb_entry(rb, struct extent_map, rb_node);
209	if (rb && mergable_maps(em, merge)) {
210		em->len += merge->len;
211		em->block_len += merge->len;
212		rb_erase(&merge->rb_node, &tree->map);
213		merge->in_tree = 0;
 
 
214		free_extent_map(merge);
215	}
216}
217
218int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
 
 
 
 
 
 
 
 
 
 
 
 
219{
220	int ret = 0;
221	struct extent_map *em;
 
222
223	write_lock(&tree->lock);
224	em = lookup_extent_mapping(tree, start, len);
225
226	WARN_ON(!em || em->start != start);
227
228	if (!em)
229		goto out;
230
 
 
 
231	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 
 
 
 
 
 
 
232
233	try_merge_map(tree, em);
234
 
 
 
 
 
235	free_extent_map(em);
236out:
237	write_unlock(&tree->lock);
238	return ret;
239
240}
241
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242/**
243 * add_extent_mapping - add new extent map to the extent tree
244 * @tree:	tree to insert new map in
245 * @em:		map to insert
246 *
247 * Insert @em into @tree or perform a simple forward/backward merge with
248 * existing mappings.  The extent_map struct passed in will be inserted
249 * into the tree directly, with an additional reference taken, or a
250 * reference dropped if the merge attempt was successful.
251 */
252int add_extent_mapping(struct extent_map_tree *tree,
253		       struct extent_map *em)
254{
255	int ret = 0;
256	struct rb_node *rb;
257	struct extent_map *exist;
258
259	exist = lookup_extent_mapping(tree, em->start, em->len);
260	if (exist) {
261		free_extent_map(exist);
262		ret = -EEXIST;
263		goto out;
264	}
265	rb = tree_insert(&tree->map, em->start, &em->rb_node);
266	if (rb) {
267		ret = -EEXIST;
268		goto out;
269	}
270	atomic_inc(&em->refs);
271
272	try_merge_map(tree, em);
273out:
274	return ret;
275}
276
277/* simple helper to do math around the end of an extent, handling wrap */
278static u64 range_end(u64 start, u64 len)
279{
280	if (start + len < start)
281		return (u64)-1;
282	return start + len;
283}
284
285struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
286					   u64 start, u64 len, int strict)
287{
288	struct extent_map *em;
289	struct rb_node *rb_node;
290	struct rb_node *prev = NULL;
291	struct rb_node *next = NULL;
292	u64 end = range_end(start, len);
293
294	rb_node = __tree_search(&tree->map, start, &prev, &next);
295	if (!rb_node) {
296		if (prev)
297			rb_node = prev;
298		else if (next)
299			rb_node = next;
300		else
301			return NULL;
302	}
303
304	em = rb_entry(rb_node, struct extent_map, rb_node);
305
306	if (strict && !(end > em->start && start < extent_map_end(em)))
307		return NULL;
308
309	atomic_inc(&em->refs);
310	return em;
311}
312
313/**
314 * lookup_extent_mapping - lookup extent_map
315 * @tree:	tree to lookup in
316 * @start:	byte offset to start the search
317 * @len:	length of the lookup range
318 *
319 * Find and return the first extent_map struct in @tree that intersects the
320 * [start, len] range.  There may be additional objects in the tree that
321 * intersect, so check the object returned carefully to make sure that no
322 * additional lookups are needed.
323 */
324struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
325					 u64 start, u64 len)
326{
327	return __lookup_extent_mapping(tree, start, len, 1);
328}
329
330/**
331 * search_extent_mapping - find a nearby extent map
332 * @tree:	tree to lookup in
333 * @start:	byte offset to start the search
334 * @len:	length of the lookup range
335 *
336 * Find and return the first extent_map struct in @tree that intersects the
337 * [start, len] range.
338 *
339 * If one can't be found, any nearby extent may be returned
340 */
341struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
342					 u64 start, u64 len)
343{
344	return __lookup_extent_mapping(tree, start, len, 0);
345}
346
347/**
348 * remove_extent_mapping - removes an extent_map from the extent tree
349 * @tree:	extent tree to remove from
350 * @em:		extent map beeing removed
351 *
352 * Removes @em from @tree.  No reference counts are dropped, and no checks
353 * are done to see if the range is in use
354 */
355int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
356{
357	int ret = 0;
358
359	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
360	rb_erase(&em->rb_node, &tree->map);
361	em->in_tree = 0;
 
 
362	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363}
v3.15
  1#include <linux/err.h>
  2#include <linux/slab.h>
 
  3#include <linux/spinlock.h>
  4#include <linux/hardirq.h>
  5#include "ctree.h"
  6#include "extent_map.h"
  7
  8
  9static struct kmem_cache *extent_map_cache;
 10
 11int __init extent_map_init(void)
 12{
 13	extent_map_cache = kmem_cache_create("btrfs_extent_map",
 14			sizeof(struct extent_map), 0,
 15			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
 16	if (!extent_map_cache)
 17		return -ENOMEM;
 18	return 0;
 19}
 20
 21void extent_map_exit(void)
 22{
 23	if (extent_map_cache)
 24		kmem_cache_destroy(extent_map_cache);
 25}
 26
 27/**
 28 * extent_map_tree_init - initialize extent map tree
 29 * @tree:		tree to initialize
 30 *
 31 * Initialize the extent tree @tree.  Should be called for each new inode
 32 * or other user of the extent_map interface.
 33 */
 34void extent_map_tree_init(struct extent_map_tree *tree)
 35{
 36	tree->map = RB_ROOT;
 37	INIT_LIST_HEAD(&tree->modified_extents);
 38	rwlock_init(&tree->lock);
 39}
 40
 41/**
 42 * alloc_extent_map - allocate new extent map structure
 43 *
 44 * Allocate a new extent_map structure.  The new structure is
 45 * returned with a reference count of one and needs to be
 46 * freed using free_extent_map()
 47 */
 48struct extent_map *alloc_extent_map(void)
 49{
 50	struct extent_map *em;
 51	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
 52	if (!em)
 53		return NULL;
 54	RB_CLEAR_NODE(&em->rb_node);
 55	em->flags = 0;
 56	em->compress_type = BTRFS_COMPRESS_NONE;
 57	em->generation = 0;
 58	atomic_set(&em->refs, 1);
 59	INIT_LIST_HEAD(&em->list);
 60	return em;
 61}
 62
 63/**
 64 * free_extent_map - drop reference count of an extent_map
 65 * @em:		extent map beeing releasead
 66 *
 67 * Drops the reference out on @em by one and free the structure
 68 * if the reference count hits zero.
 69 */
 70void free_extent_map(struct extent_map *em)
 71{
 72	if (!em)
 73		return;
 74	WARN_ON(atomic_read(&em->refs) == 0);
 75	if (atomic_dec_and_test(&em->refs)) {
 76		WARN_ON(extent_map_in_tree(em));
 77		WARN_ON(!list_empty(&em->list));
 78		kmem_cache_free(extent_map_cache, em);
 79	}
 80}
 81
 82/* simple helper to do math around the end of an extent, handling wrap */
 83static u64 range_end(u64 start, u64 len)
 84{
 85	if (start + len < start)
 86		return (u64)-1;
 87	return start + len;
 88}
 89
 90static int tree_insert(struct rb_root *root, struct extent_map *em)
 91{
 92	struct rb_node **p = &root->rb_node;
 93	struct rb_node *parent = NULL;
 94	struct extent_map *entry = NULL;
 95	struct rb_node *orig_parent = NULL;
 96	u64 end = range_end(em->start, em->len);
 97
 98	while (*p) {
 99		parent = *p;
100		entry = rb_entry(parent, struct extent_map, rb_node);
101
102		if (em->start < entry->start)
 
 
103			p = &(*p)->rb_left;
104		else if (em->start >= extent_map_end(entry))
105			p = &(*p)->rb_right;
106		else
107			return -EEXIST;
108	}
109
110	orig_parent = parent;
111	while (parent && em->start >= extent_map_end(entry)) {
112		parent = rb_next(parent);
113		entry = rb_entry(parent, struct extent_map, rb_node);
114	}
115	if (parent)
116		if (end > entry->start && em->start < extent_map_end(entry))
117			return -EEXIST;
118
119	parent = orig_parent;
120	entry = rb_entry(parent, struct extent_map, rb_node);
121	while (parent && em->start < entry->start) {
122		parent = rb_prev(parent);
123		entry = rb_entry(parent, struct extent_map, rb_node);
124	}
125	if (parent)
126		if (end > entry->start && em->start < extent_map_end(entry))
127			return -EEXIST;
128
129	rb_link_node(&em->rb_node, orig_parent, p);
130	rb_insert_color(&em->rb_node, root);
131	return 0;
132}
133
134/*
135 * search through the tree for an extent_map with a given offset.  If
136 * it can't be found, try to find some neighboring extents
137 */
138static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
139				     struct rb_node **prev_ret,
140				     struct rb_node **next_ret)
141{
142	struct rb_node *n = root->rb_node;
143	struct rb_node *prev = NULL;
144	struct rb_node *orig_prev = NULL;
145	struct extent_map *entry;
146	struct extent_map *prev_entry = NULL;
147
148	while (n) {
149		entry = rb_entry(n, struct extent_map, rb_node);
150		prev = n;
151		prev_entry = entry;
152
 
 
153		if (offset < entry->start)
154			n = n->rb_left;
155		else if (offset >= extent_map_end(entry))
156			n = n->rb_right;
157		else
158			return n;
159	}
160
161	if (prev_ret) {
162		orig_prev = prev;
163		while (prev && offset >= extent_map_end(prev_entry)) {
164			prev = rb_next(prev);
165			prev_entry = rb_entry(prev, struct extent_map, rb_node);
166		}
167		*prev_ret = prev;
168		prev = orig_prev;
169	}
170
171	if (next_ret) {
172		prev_entry = rb_entry(prev, struct extent_map, rb_node);
173		while (prev && offset < prev_entry->start) {
174			prev = rb_prev(prev);
175			prev_entry = rb_entry(prev, struct extent_map, rb_node);
176		}
177		*next_ret = prev;
178	}
179	return NULL;
180}
181
182/* check to see if two extent_map structs are adjacent and safe to merge */
183static int mergable_maps(struct extent_map *prev, struct extent_map *next)
184{
185	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
186		return 0;
187
188	/*
189	 * don't merge compressed extents, we need to know their
190	 * actual size
191	 */
192	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
193		return 0;
194
195	if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
196	    test_bit(EXTENT_FLAG_LOGGING, &next->flags))
197		return 0;
198
199	/*
200	 * We don't want to merge stuff that hasn't been written to the log yet
201	 * since it may not reflect exactly what is on disk, and that would be
202	 * bad.
203	 */
204	if (!list_empty(&prev->list) || !list_empty(&next->list))
205		return 0;
206
207	if (extent_map_end(prev) == next->start &&
208	    prev->flags == next->flags &&
209	    prev->bdev == next->bdev &&
210	    ((next->block_start == EXTENT_MAP_HOLE &&
211	      prev->block_start == EXTENT_MAP_HOLE) ||
212	     (next->block_start == EXTENT_MAP_INLINE &&
213	      prev->block_start == EXTENT_MAP_INLINE) ||
214	     (next->block_start == EXTENT_MAP_DELALLOC &&
215	      prev->block_start == EXTENT_MAP_DELALLOC) ||
216	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
217	      next->block_start == extent_map_block_end(prev)))) {
218		return 1;
219	}
220	return 0;
221}
222
223static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
224{
225	struct extent_map *merge = NULL;
226	struct rb_node *rb;
227
228	if (em->start != 0) {
229		rb = rb_prev(&em->rb_node);
230		if (rb)
231			merge = rb_entry(rb, struct extent_map, rb_node);
232		if (rb && mergable_maps(merge, em)) {
233			em->start = merge->start;
234			em->orig_start = merge->orig_start;
235			em->len += merge->len;
236			em->block_len += merge->block_len;
237			em->block_start = merge->block_start;
238			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
239			em->mod_start = merge->mod_start;
240			em->generation = max(em->generation, merge->generation);
241
242			rb_erase(&merge->rb_node, &tree->map);
243			RB_CLEAR_NODE(&merge->rb_node);
244			free_extent_map(merge);
245		}
246	}
247
248	rb = rb_next(&em->rb_node);
249	if (rb)
250		merge = rb_entry(rb, struct extent_map, rb_node);
251	if (rb && mergable_maps(em, merge)) {
252		em->len += merge->len;
253		em->block_len += merge->block_len;
254		rb_erase(&merge->rb_node, &tree->map);
255		RB_CLEAR_NODE(&merge->rb_node);
256		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
257		em->generation = max(em->generation, merge->generation);
258		free_extent_map(merge);
259	}
260}
261
262/**
263 * unpin_extent_cache - unpin an extent from the cache
264 * @tree:	tree to unpin the extent in
265 * @start:	logical offset in the file
266 * @len:	length of the extent
267 * @gen:	generation that this extent has been modified in
268 *
269 * Called after an extent has been written to disk properly.  Set the generation
270 * to the generation that actually added the file item to the inode so we know
271 * we need to sync this extent when we call fsync().
272 */
273int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
274		       u64 gen)
275{
276	int ret = 0;
277	struct extent_map *em;
278	bool prealloc = false;
279
280	write_lock(&tree->lock);
281	em = lookup_extent_mapping(tree, start, len);
282
283	WARN_ON(!em || em->start != start);
284
285	if (!em)
286		goto out;
287
288	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
289		list_move(&em->list, &tree->modified_extents);
290	em->generation = gen;
291	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
292	em->mod_start = em->start;
293	em->mod_len = em->len;
294
295	if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
296		prealloc = true;
297		clear_bit(EXTENT_FLAG_FILLING, &em->flags);
298	}
299
300	try_merge_map(tree, em);
301
302	if (prealloc) {
303		em->mod_start = em->start;
304		em->mod_len = em->len;
305	}
306
307	free_extent_map(em);
308out:
309	write_unlock(&tree->lock);
310	return ret;
311
312}
313
314void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
315{
316	clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
317	if (extent_map_in_tree(em))
318		try_merge_map(tree, em);
319}
320
321static inline void setup_extent_mapping(struct extent_map_tree *tree,
322					struct extent_map *em,
323					int modified)
324{
325	atomic_inc(&em->refs);
326	em->mod_start = em->start;
327	em->mod_len = em->len;
328
329	if (modified)
330		list_move(&em->list, &tree->modified_extents);
331	else
332		try_merge_map(tree, em);
333}
334
335/**
336 * add_extent_mapping - add new extent map to the extent tree
337 * @tree:	tree to insert new map in
338 * @em:		map to insert
339 *
340 * Insert @em into @tree or perform a simple forward/backward merge with
341 * existing mappings.  The extent_map struct passed in will be inserted
342 * into the tree directly, with an additional reference taken, or a
343 * reference dropped if the merge attempt was successful.
344 */
345int add_extent_mapping(struct extent_map_tree *tree,
346		       struct extent_map *em, int modified)
347{
348	int ret = 0;
 
 
349
350	ret = tree_insert(&tree->map, em);
351	if (ret)
 
 
 
 
 
 
 
352		goto out;
 
 
353
354	setup_extent_mapping(tree, em, modified);
355out:
356	return ret;
357}
358
359static struct extent_map *
360__lookup_extent_mapping(struct extent_map_tree *tree,
361			u64 start, u64 len, int strict)
 
 
 
 
 
 
 
362{
363	struct extent_map *em;
364	struct rb_node *rb_node;
365	struct rb_node *prev = NULL;
366	struct rb_node *next = NULL;
367	u64 end = range_end(start, len);
368
369	rb_node = __tree_search(&tree->map, start, &prev, &next);
370	if (!rb_node) {
371		if (prev)
372			rb_node = prev;
373		else if (next)
374			rb_node = next;
375		else
376			return NULL;
377	}
378
379	em = rb_entry(rb_node, struct extent_map, rb_node);
380
381	if (strict && !(end > em->start && start < extent_map_end(em)))
382		return NULL;
383
384	atomic_inc(&em->refs);
385	return em;
386}
387
388/**
389 * lookup_extent_mapping - lookup extent_map
390 * @tree:	tree to lookup in
391 * @start:	byte offset to start the search
392 * @len:	length of the lookup range
393 *
394 * Find and return the first extent_map struct in @tree that intersects the
395 * [start, len] range.  There may be additional objects in the tree that
396 * intersect, so check the object returned carefully to make sure that no
397 * additional lookups are needed.
398 */
399struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
400					 u64 start, u64 len)
401{
402	return __lookup_extent_mapping(tree, start, len, 1);
403}
404
405/**
406 * search_extent_mapping - find a nearby extent map
407 * @tree:	tree to lookup in
408 * @start:	byte offset to start the search
409 * @len:	length of the lookup range
410 *
411 * Find and return the first extent_map struct in @tree that intersects the
412 * [start, len] range.
413 *
414 * If one can't be found, any nearby extent may be returned
415 */
416struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
417					 u64 start, u64 len)
418{
419	return __lookup_extent_mapping(tree, start, len, 0);
420}
421
422/**
423 * remove_extent_mapping - removes an extent_map from the extent tree
424 * @tree:	extent tree to remove from
425 * @em:		extent map beeing removed
426 *
427 * Removes @em from @tree.  No reference counts are dropped, and no checks
428 * are done to see if the range is in use
429 */
430int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
431{
432	int ret = 0;
433
434	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
435	rb_erase(&em->rb_node, &tree->map);
436	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
437		list_del_init(&em->list);
438	RB_CLEAR_NODE(&em->rb_node);
439	return ret;
440}
441
442void replace_extent_mapping(struct extent_map_tree *tree,
443			    struct extent_map *cur,
444			    struct extent_map *new,
445			    int modified)
446{
447	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
448	ASSERT(extent_map_in_tree(cur));
449	if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
450		list_del_init(&cur->list);
451	rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map);
452	RB_CLEAR_NODE(&cur->rb_node);
453
454	setup_extent_mapping(tree, new, modified);
455}