Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2011 STRATO.  All rights reserved.
  4 */
  5
  6#include <linux/sched.h>
  7#include <linux/pagemap.h>
  8#include <linux/writeback.h>
  9#include <linux/blkdev.h>
 10#include <linux/rbtree.h>
 11#include <linux/slab.h>
 12#include <linux/workqueue.h>
 13#include "ctree.h"
 14#include "volumes.h"
 15#include "disk-io.h"
 16#include "transaction.h"
 17#include "dev-replace.h"
 18
 19#undef DEBUG
 20
 21/*
 22 * This is the implementation for the generic read ahead framework.
 23 *
 24 * To trigger a readahead, btrfs_reada_add must be called. It will start
 25 * a read ahead for the given range [start, end) on tree root. The returned
 26 * handle can either be used to wait on the readahead to finish
 27 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
 28 *
 29 * The read ahead works as follows:
 30 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
 31 * reada_start_machine will then search for extents to prefetch and trigger
 32 * some reads. When a read finishes for a node, all contained node/leaf
 33 * pointers that lie in the given range will also be enqueued. The reads will
 34 * be triggered in sequential order, thus giving a big win over a naive
 35 * enumeration. It will also make use of multi-device layouts. Each disk
 36 * will have its on read pointer and all disks will by utilized in parallel.
 37 * Also will no two disks read both sides of a mirror simultaneously, as this
 38 * would waste seeking capacity. Instead both disks will read different parts
 39 * of the filesystem.
 40 * Any number of readaheads can be started in parallel. The read order will be
 41 * determined globally, i.e. 2 parallel readaheads will normally finish faster
 42 * than the 2 started one after another.
 43 */
 44
 45#define MAX_IN_FLIGHT 6
 46
 47struct reada_extctl {
 48	struct list_head	list;
 49	struct reada_control	*rc;
 50	u64			generation;
 51};
 52
 53struct reada_extent {
 54	u64			logical;
 55	struct btrfs_key	top;
 56	struct list_head	extctl;
 57	int 			refcnt;
 58	spinlock_t		lock;
 59	struct reada_zone	*zones[BTRFS_MAX_MIRRORS];
 60	int			nzones;
 61	int			scheduled;
 62};
 63
 64struct reada_zone {
 65	u64			start;
 66	u64			end;
 67	u64			elems;
 68	struct list_head	list;
 69	spinlock_t		lock;
 70	int			locked;
 71	struct btrfs_device	*device;
 72	struct btrfs_device	*devs[BTRFS_MAX_MIRRORS]; /* full list, incl
 73							   * self */
 74	int			ndevs;
 75	struct kref		refcnt;
 76};
 77
 78struct reada_machine_work {
 79	struct btrfs_work	work;
 80	struct btrfs_fs_info	*fs_info;
 81};
 82
 83static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
 84static void reada_control_release(struct kref *kref);
 85static void reada_zone_release(struct kref *kref);
 86static void reada_start_machine(struct btrfs_fs_info *fs_info);
 87static void __reada_start_machine(struct btrfs_fs_info *fs_info);
 88
 89static int reada_add_block(struct reada_control *rc, u64 logical,
 90			   struct btrfs_key *top, u64 generation);
 91
 92/* recurses */
 93/* in case of err, eb might be NULL */
 94static void __readahead_hook(struct btrfs_fs_info *fs_info,
 95			     struct reada_extent *re, struct extent_buffer *eb,
 96			     int err)
 97{
 98	int nritems;
 99	int i;
100	u64 bytenr;
101	u64 generation;
102	struct list_head list;
103
104	spin_lock(&re->lock);
105	/*
106	 * just take the full list from the extent. afterwards we
107	 * don't need the lock anymore
108	 */
109	list_replace_init(&re->extctl, &list);
110	re->scheduled = 0;
111	spin_unlock(&re->lock);
112
113	/*
114	 * this is the error case, the extent buffer has not been
115	 * read correctly. We won't access anything from it and
116	 * just cleanup our data structures. Effectively this will
117	 * cut the branch below this node from read ahead.
118	 */
119	if (err)
120		goto cleanup;
121
122	/*
123	 * FIXME: currently we just set nritems to 0 if this is a leaf,
124	 * effectively ignoring the content. In a next step we could
125	 * trigger more readahead depending from the content, e.g.
126	 * fetch the checksums for the extents in the leaf.
127	 */
128	if (!btrfs_header_level(eb))
129		goto cleanup;
130
131	nritems = btrfs_header_nritems(eb);
132	generation = btrfs_header_generation(eb);
133	for (i = 0; i < nritems; i++) {
134		struct reada_extctl *rec;
135		u64 n_gen;
136		struct btrfs_key key;
137		struct btrfs_key next_key;
138
139		btrfs_node_key_to_cpu(eb, &key, i);
140		if (i + 1 < nritems)
141			btrfs_node_key_to_cpu(eb, &next_key, i + 1);
142		else
143			next_key = re->top;
144		bytenr = btrfs_node_blockptr(eb, i);
145		n_gen = btrfs_node_ptr_generation(eb, i);
146
147		list_for_each_entry(rec, &list, list) {
148			struct reada_control *rc = rec->rc;
149
150			/*
151			 * if the generation doesn't match, just ignore this
152			 * extctl. This will probably cut off a branch from
153			 * prefetch. Alternatively one could start a new (sub-)
154			 * prefetch for this branch, starting again from root.
155			 * FIXME: move the generation check out of this loop
156			 */
157#ifdef DEBUG
158			if (rec->generation != generation) {
159				btrfs_debug(fs_info,
160					    "generation mismatch for (%llu,%d,%llu) %llu != %llu",
161					    key.objectid, key.type, key.offset,
162					    rec->generation, generation);
163			}
164#endif
165			if (rec->generation == generation &&
166			    btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
167			    btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
168				reada_add_block(rc, bytenr, &next_key, n_gen);
169		}
170	}
171
172cleanup:
173	/*
174	 * free extctl records
175	 */
176	while (!list_empty(&list)) {
177		struct reada_control *rc;
178		struct reada_extctl *rec;
179
180		rec = list_first_entry(&list, struct reada_extctl, list);
181		list_del(&rec->list);
182		rc = rec->rc;
183		kfree(rec);
184
185		kref_get(&rc->refcnt);
186		if (atomic_dec_and_test(&rc->elems)) {
187			kref_put(&rc->refcnt, reada_control_release);
188			wake_up(&rc->wait);
189		}
190		kref_put(&rc->refcnt, reada_control_release);
191
192		reada_extent_put(fs_info, re);	/* one ref for each entry */
193	}
194
195	return;
196}
197
198int btree_readahead_hook(struct extent_buffer *eb, int err)
199{
200	struct btrfs_fs_info *fs_info = eb->fs_info;
201	int ret = 0;
202	struct reada_extent *re;
203
204	/* find extent */
205	spin_lock(&fs_info->reada_lock);
206	re = radix_tree_lookup(&fs_info->reada_tree,
207			       eb->start >> PAGE_SHIFT);
208	if (re)
209		re->refcnt++;
210	spin_unlock(&fs_info->reada_lock);
211	if (!re) {
212		ret = -1;
213		goto start_machine;
214	}
215
216	__readahead_hook(fs_info, re, eb, err);
217	reada_extent_put(fs_info, re);	/* our ref */
218
219start_machine:
220	reada_start_machine(fs_info);
221	return ret;
222}
223
224static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
225					  struct btrfs_bio *bbio)
226{
227	struct btrfs_fs_info *fs_info = dev->fs_info;
228	int ret;
229	struct reada_zone *zone;
230	struct btrfs_block_group_cache *cache = NULL;
231	u64 start;
232	u64 end;
233	int i;
234
235	zone = NULL;
236	spin_lock(&fs_info->reada_lock);
237	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
238				     logical >> PAGE_SHIFT, 1);
239	if (ret == 1 && logical >= zone->start && logical <= zone->end) {
240		kref_get(&zone->refcnt);
241		spin_unlock(&fs_info->reada_lock);
242		return zone;
243	}
244
245	spin_unlock(&fs_info->reada_lock);
246
247	cache = btrfs_lookup_block_group(fs_info, logical);
248	if (!cache)
249		return NULL;
250
251	start = cache->key.objectid;
252	end = start + cache->key.offset - 1;
253	btrfs_put_block_group(cache);
254
255	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
256	if (!zone)
257		return NULL;
258
259	ret = radix_tree_preload(GFP_KERNEL);
260	if (ret) {
261		kfree(zone);
262		return NULL;
263	}
264
265	zone->start = start;
266	zone->end = end;
267	INIT_LIST_HEAD(&zone->list);
268	spin_lock_init(&zone->lock);
269	zone->locked = 0;
270	kref_init(&zone->refcnt);
271	zone->elems = 0;
272	zone->device = dev; /* our device always sits at index 0 */
273	for (i = 0; i < bbio->num_stripes; ++i) {
274		/* bounds have already been checked */
275		zone->devs[i] = bbio->stripes[i].dev;
276	}
277	zone->ndevs = bbio->num_stripes;
278
279	spin_lock(&fs_info->reada_lock);
280	ret = radix_tree_insert(&dev->reada_zones,
281				(unsigned long)(zone->end >> PAGE_SHIFT),
282				zone);
283
284	if (ret == -EEXIST) {
285		kfree(zone);
286		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
287					     logical >> PAGE_SHIFT, 1);
288		if (ret == 1 && logical >= zone->start && logical <= zone->end)
289			kref_get(&zone->refcnt);
290		else
291			zone = NULL;
292	}
293	spin_unlock(&fs_info->reada_lock);
294	radix_tree_preload_end();
295
296	return zone;
297}
298
299static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
300					      u64 logical,
301					      struct btrfs_key *top)
302{
303	int ret;
304	struct reada_extent *re = NULL;
305	struct reada_extent *re_exist = NULL;
306	struct btrfs_bio *bbio = NULL;
307	struct btrfs_device *dev;
308	struct btrfs_device *prev_dev;
309	u64 length;
310	int real_stripes;
311	int nzones = 0;
312	unsigned long index = logical >> PAGE_SHIFT;
313	int dev_replace_is_ongoing;
314	int have_zone = 0;
315
316	spin_lock(&fs_info->reada_lock);
317	re = radix_tree_lookup(&fs_info->reada_tree, index);
318	if (re)
319		re->refcnt++;
320	spin_unlock(&fs_info->reada_lock);
321
322	if (re)
323		return re;
324
325	re = kzalloc(sizeof(*re), GFP_KERNEL);
326	if (!re)
327		return NULL;
328
329	re->logical = logical;
330	re->top = *top;
331	INIT_LIST_HEAD(&re->extctl);
332	spin_lock_init(&re->lock);
333	re->refcnt = 1;
334
335	/*
336	 * map block
337	 */
338	length = fs_info->nodesize;
339	ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
340			&length, &bbio, 0);
341	if (ret || !bbio || length < fs_info->nodesize)
342		goto error;
343
344	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
345		btrfs_err(fs_info,
346			   "readahead: more than %d copies not supported",
347			   BTRFS_MAX_MIRRORS);
348		goto error;
349	}
350
351	real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
352	for (nzones = 0; nzones < real_stripes; ++nzones) {
353		struct reada_zone *zone;
354
355		dev = bbio->stripes[nzones].dev;
356
357		/* cannot read ahead on missing device. */
358		 if (!dev->bdev)
359			continue;
360
361		zone = reada_find_zone(dev, logical, bbio);
362		if (!zone)
363			continue;
364
365		re->zones[re->nzones++] = zone;
366		spin_lock(&zone->lock);
367		if (!zone->elems)
368			kref_get(&zone->refcnt);
369		++zone->elems;
370		spin_unlock(&zone->lock);
371		spin_lock(&fs_info->reada_lock);
372		kref_put(&zone->refcnt, reada_zone_release);
373		spin_unlock(&fs_info->reada_lock);
374	}
375	if (re->nzones == 0) {
376		/* not a single zone found, error and out */
377		goto error;
378	}
379
380	ret = radix_tree_preload(GFP_KERNEL);
381	if (ret)
382		goto error;
383
384	/* insert extent in reada_tree + all per-device trees, all or nothing */
385	btrfs_dev_replace_read_lock(&fs_info->dev_replace);
386	spin_lock(&fs_info->reada_lock);
387	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
388	if (ret == -EEXIST) {
389		re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
390		re_exist->refcnt++;
391		spin_unlock(&fs_info->reada_lock);
392		btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
393		radix_tree_preload_end();
394		goto error;
395	}
396	if (ret) {
397		spin_unlock(&fs_info->reada_lock);
398		btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
399		radix_tree_preload_end();
400		goto error;
401	}
402	radix_tree_preload_end();
403	prev_dev = NULL;
404	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
405			&fs_info->dev_replace);
406	for (nzones = 0; nzones < re->nzones; ++nzones) {
407		dev = re->zones[nzones]->device;
408
409		if (dev == prev_dev) {
410			/*
411			 * in case of DUP, just add the first zone. As both
412			 * are on the same device, there's nothing to gain
413			 * from adding both.
414			 * Also, it wouldn't work, as the tree is per device
415			 * and adding would fail with EEXIST
416			 */
417			continue;
418		}
419		if (!dev->bdev)
420			continue;
421
422		if (dev_replace_is_ongoing &&
423		    dev == fs_info->dev_replace.tgtdev) {
424			/*
425			 * as this device is selected for reading only as
426			 * a last resort, skip it for read ahead.
427			 */
428			continue;
429		}
430		prev_dev = dev;
431		ret = radix_tree_insert(&dev->reada_extents, index, re);
432		if (ret) {
433			while (--nzones >= 0) {
434				dev = re->zones[nzones]->device;
435				BUG_ON(dev == NULL);
436				/* ignore whether the entry was inserted */
437				radix_tree_delete(&dev->reada_extents, index);
438			}
439			radix_tree_delete(&fs_info->reada_tree, index);
440			spin_unlock(&fs_info->reada_lock);
441			btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
442			goto error;
443		}
444		have_zone = 1;
445	}
446	spin_unlock(&fs_info->reada_lock);
447	btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
448
449	if (!have_zone)
450		goto error;
451
452	btrfs_put_bbio(bbio);
453	return re;
454
455error:
456	for (nzones = 0; nzones < re->nzones; ++nzones) {
457		struct reada_zone *zone;
458
459		zone = re->zones[nzones];
460		kref_get(&zone->refcnt);
461		spin_lock(&zone->lock);
462		--zone->elems;
463		if (zone->elems == 0) {
464			/*
465			 * no fs_info->reada_lock needed, as this can't be
466			 * the last ref
467			 */
468			kref_put(&zone->refcnt, reada_zone_release);
469		}
470		spin_unlock(&zone->lock);
471
472		spin_lock(&fs_info->reada_lock);
473		kref_put(&zone->refcnt, reada_zone_release);
474		spin_unlock(&fs_info->reada_lock);
475	}
476	btrfs_put_bbio(bbio);
477	kfree(re);
478	return re_exist;
479}
480
481static void reada_extent_put(struct btrfs_fs_info *fs_info,
482			     struct reada_extent *re)
483{
484	int i;
485	unsigned long index = re->logical >> PAGE_SHIFT;
486
487	spin_lock(&fs_info->reada_lock);
488	if (--re->refcnt) {
489		spin_unlock(&fs_info->reada_lock);
490		return;
491	}
492
493	radix_tree_delete(&fs_info->reada_tree, index);
494	for (i = 0; i < re->nzones; ++i) {
495		struct reada_zone *zone = re->zones[i];
496
497		radix_tree_delete(&zone->device->reada_extents, index);
498	}
499
500	spin_unlock(&fs_info->reada_lock);
501
502	for (i = 0; i < re->nzones; ++i) {
503		struct reada_zone *zone = re->zones[i];
504
505		kref_get(&zone->refcnt);
506		spin_lock(&zone->lock);
507		--zone->elems;
508		if (zone->elems == 0) {
509			/* no fs_info->reada_lock needed, as this can't be
510			 * the last ref */
511			kref_put(&zone->refcnt, reada_zone_release);
512		}
513		spin_unlock(&zone->lock);
514
515		spin_lock(&fs_info->reada_lock);
516		kref_put(&zone->refcnt, reada_zone_release);
517		spin_unlock(&fs_info->reada_lock);
518	}
519
520	kfree(re);
521}
522
523static void reada_zone_release(struct kref *kref)
524{
525	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
526
527	radix_tree_delete(&zone->device->reada_zones,
528			  zone->end >> PAGE_SHIFT);
529
530	kfree(zone);
531}
532
533static void reada_control_release(struct kref *kref)
534{
535	struct reada_control *rc = container_of(kref, struct reada_control,
536						refcnt);
537
538	kfree(rc);
539}
540
541static int reada_add_block(struct reada_control *rc, u64 logical,
542			   struct btrfs_key *top, u64 generation)
543{
544	struct btrfs_fs_info *fs_info = rc->fs_info;
545	struct reada_extent *re;
546	struct reada_extctl *rec;
547
548	/* takes one ref */
549	re = reada_find_extent(fs_info, logical, top);
550	if (!re)
551		return -1;
552
553	rec = kzalloc(sizeof(*rec), GFP_KERNEL);
554	if (!rec) {
555		reada_extent_put(fs_info, re);
556		return -ENOMEM;
557	}
558
559	rec->rc = rc;
560	rec->generation = generation;
561	atomic_inc(&rc->elems);
562
563	spin_lock(&re->lock);
564	list_add_tail(&rec->list, &re->extctl);
565	spin_unlock(&re->lock);
566
567	/* leave the ref on the extent */
568
569	return 0;
570}
571
572/*
573 * called with fs_info->reada_lock held
574 */
575static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
576{
577	int i;
578	unsigned long index = zone->end >> PAGE_SHIFT;
579
580	for (i = 0; i < zone->ndevs; ++i) {
581		struct reada_zone *peer;
582		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
583		if (peer && peer->device != zone->device)
584			peer->locked = lock;
585	}
586}
587
588/*
589 * called with fs_info->reada_lock held
590 */
591static int reada_pick_zone(struct btrfs_device *dev)
592{
593	struct reada_zone *top_zone = NULL;
594	struct reada_zone *top_locked_zone = NULL;
595	u64 top_elems = 0;
596	u64 top_locked_elems = 0;
597	unsigned long index = 0;
598	int ret;
599
600	if (dev->reada_curr_zone) {
601		reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
602		kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
603		dev->reada_curr_zone = NULL;
604	}
605	/* pick the zone with the most elements */
606	while (1) {
607		struct reada_zone *zone;
608
609		ret = radix_tree_gang_lookup(&dev->reada_zones,
610					     (void **)&zone, index, 1);
611		if (ret == 0)
612			break;
613		index = (zone->end >> PAGE_SHIFT) + 1;
614		if (zone->locked) {
615			if (zone->elems > top_locked_elems) {
616				top_locked_elems = zone->elems;
617				top_locked_zone = zone;
618			}
619		} else {
620			if (zone->elems > top_elems) {
621				top_elems = zone->elems;
622				top_zone = zone;
623			}
624		}
625	}
626	if (top_zone)
627		dev->reada_curr_zone = top_zone;
628	else if (top_locked_zone)
629		dev->reada_curr_zone = top_locked_zone;
630	else
631		return 0;
632
633	dev->reada_next = dev->reada_curr_zone->start;
634	kref_get(&dev->reada_curr_zone->refcnt);
635	reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
636
637	return 1;
638}
639
640static int reada_start_machine_dev(struct btrfs_device *dev)
641{
642	struct btrfs_fs_info *fs_info = dev->fs_info;
643	struct reada_extent *re = NULL;
644	int mirror_num = 0;
645	struct extent_buffer *eb = NULL;
646	u64 logical;
647	int ret;
648	int i;
649
650	spin_lock(&fs_info->reada_lock);
651	if (dev->reada_curr_zone == NULL) {
652		ret = reada_pick_zone(dev);
653		if (!ret) {
654			spin_unlock(&fs_info->reada_lock);
655			return 0;
656		}
657	}
658	/*
659	 * FIXME currently we issue the reads one extent at a time. If we have
660	 * a contiguous block of extents, we could also coagulate them or use
661	 * plugging to speed things up
662	 */
663	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
664				     dev->reada_next >> PAGE_SHIFT, 1);
665	if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
666		ret = reada_pick_zone(dev);
667		if (!ret) {
668			spin_unlock(&fs_info->reada_lock);
669			return 0;
670		}
671		re = NULL;
672		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
673					dev->reada_next >> PAGE_SHIFT, 1);
674	}
675	if (ret == 0) {
676		spin_unlock(&fs_info->reada_lock);
677		return 0;
678	}
679	dev->reada_next = re->logical + fs_info->nodesize;
680	re->refcnt++;
681
682	spin_unlock(&fs_info->reada_lock);
683
684	spin_lock(&re->lock);
685	if (re->scheduled || list_empty(&re->extctl)) {
686		spin_unlock(&re->lock);
687		reada_extent_put(fs_info, re);
688		return 0;
689	}
690	re->scheduled = 1;
691	spin_unlock(&re->lock);
692
693	/*
694	 * find mirror num
695	 */
696	for (i = 0; i < re->nzones; ++i) {
697		if (re->zones[i]->device == dev) {
698			mirror_num = i + 1;
699			break;
700		}
701	}
702	logical = re->logical;
703
704	atomic_inc(&dev->reada_in_flight);
705	ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
706	if (ret)
707		__readahead_hook(fs_info, re, NULL, ret);
708	else if (eb)
709		__readahead_hook(fs_info, re, eb, ret);
710
711	if (eb)
712		free_extent_buffer(eb);
713
714	atomic_dec(&dev->reada_in_flight);
715	reada_extent_put(fs_info, re);
716
717	return 1;
718
719}
720
721static void reada_start_machine_worker(struct btrfs_work *work)
722{
723	struct reada_machine_work *rmw;
724	struct btrfs_fs_info *fs_info;
725	int old_ioprio;
726
727	rmw = container_of(work, struct reada_machine_work, work);
728	fs_info = rmw->fs_info;
729
730	kfree(rmw);
731
732	old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
733				       task_nice_ioprio(current));
734	set_task_ioprio(current, BTRFS_IOPRIO_READA);
735	__reada_start_machine(fs_info);
736	set_task_ioprio(current, old_ioprio);
737
738	atomic_dec(&fs_info->reada_works_cnt);
739}
740
741static void __reada_start_machine(struct btrfs_fs_info *fs_info)
742{
743	struct btrfs_device *device;
744	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
745	u64 enqueued;
746	u64 total = 0;
747	int i;
748
749	do {
750		enqueued = 0;
751		mutex_lock(&fs_devices->device_list_mutex);
752		list_for_each_entry(device, &fs_devices->devices, dev_list) {
753			if (atomic_read(&device->reada_in_flight) <
754			    MAX_IN_FLIGHT)
755				enqueued += reada_start_machine_dev(device);
756		}
757		mutex_unlock(&fs_devices->device_list_mutex);
758		total += enqueued;
759	} while (enqueued && total < 10000);
760
761	if (enqueued == 0)
762		return;
763
764	/*
765	 * If everything is already in the cache, this is effectively single
766	 * threaded. To a) not hold the caller for too long and b) to utilize
767	 * more cores, we broke the loop above after 10000 iterations and now
768	 * enqueue to workers to finish it. This will distribute the load to
769	 * the cores.
770	 */
771	for (i = 0; i < 2; ++i) {
772		reada_start_machine(fs_info);
773		if (atomic_read(&fs_info->reada_works_cnt) >
774		    BTRFS_MAX_MIRRORS * 2)
775			break;
776	}
777}
778
779static void reada_start_machine(struct btrfs_fs_info *fs_info)
780{
781	struct reada_machine_work *rmw;
782
783	rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
784	if (!rmw) {
785		/* FIXME we cannot handle this properly right now */
786		BUG();
787	}
788	btrfs_init_work(&rmw->work, btrfs_readahead_helper,
789			reada_start_machine_worker, NULL, NULL);
790	rmw->fs_info = fs_info;
791
792	btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
793	atomic_inc(&fs_info->reada_works_cnt);
794}
795
796#ifdef DEBUG
797static void dump_devs(struct btrfs_fs_info *fs_info, int all)
798{
799	struct btrfs_device *device;
800	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
801	unsigned long index;
802	int ret;
803	int i;
804	int j;
805	int cnt;
806
807	spin_lock(&fs_info->reada_lock);
808	list_for_each_entry(device, &fs_devices->devices, dev_list) {
809		btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
810			atomic_read(&device->reada_in_flight));
811		index = 0;
812		while (1) {
813			struct reada_zone *zone;
814			ret = radix_tree_gang_lookup(&device->reada_zones,
815						     (void **)&zone, index, 1);
816			if (ret == 0)
817				break;
818			pr_debug("  zone %llu-%llu elems %llu locked %d devs",
819				    zone->start, zone->end, zone->elems,
820				    zone->locked);
821			for (j = 0; j < zone->ndevs; ++j) {
822				pr_cont(" %lld",
823					zone->devs[j]->devid);
824			}
825			if (device->reada_curr_zone == zone)
826				pr_cont(" curr off %llu",
827					device->reada_next - zone->start);
828			pr_cont("\n");
829			index = (zone->end >> PAGE_SHIFT) + 1;
830		}
831		cnt = 0;
832		index = 0;
833		while (all) {
834			struct reada_extent *re = NULL;
835
836			ret = radix_tree_gang_lookup(&device->reada_extents,
837						     (void **)&re, index, 1);
838			if (ret == 0)
839				break;
840			pr_debug("  re: logical %llu size %u empty %d scheduled %d",
841				re->logical, fs_info->nodesize,
842				list_empty(&re->extctl), re->scheduled);
843
844			for (i = 0; i < re->nzones; ++i) {
845				pr_cont(" zone %llu-%llu devs",
846					re->zones[i]->start,
847					re->zones[i]->end);
848				for (j = 0; j < re->zones[i]->ndevs; ++j) {
849					pr_cont(" %lld",
850						re->zones[i]->devs[j]->devid);
851				}
852			}
853			pr_cont("\n");
854			index = (re->logical >> PAGE_SHIFT) + 1;
855			if (++cnt > 15)
856				break;
857		}
858	}
859
860	index = 0;
861	cnt = 0;
862	while (all) {
863		struct reada_extent *re = NULL;
864
865		ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
866					     index, 1);
867		if (ret == 0)
868			break;
869		if (!re->scheduled) {
870			index = (re->logical >> PAGE_SHIFT) + 1;
871			continue;
872		}
873		pr_debug("re: logical %llu size %u list empty %d scheduled %d",
874			re->logical, fs_info->nodesize,
875			list_empty(&re->extctl), re->scheduled);
876		for (i = 0; i < re->nzones; ++i) {
877			pr_cont(" zone %llu-%llu devs",
878				re->zones[i]->start,
879				re->zones[i]->end);
880			for (j = 0; j < re->zones[i]->ndevs; ++j) {
881				pr_cont(" %lld",
882				       re->zones[i]->devs[j]->devid);
883			}
884		}
885		pr_cont("\n");
886		index = (re->logical >> PAGE_SHIFT) + 1;
887	}
888	spin_unlock(&fs_info->reada_lock);
889}
890#endif
891
892/*
893 * interface
894 */
895struct reada_control *btrfs_reada_add(struct btrfs_root *root,
896			struct btrfs_key *key_start, struct btrfs_key *key_end)
897{
898	struct reada_control *rc;
899	u64 start;
900	u64 generation;
901	int ret;
902	struct extent_buffer *node;
903	static struct btrfs_key max_key = {
904		.objectid = (u64)-1,
905		.type = (u8)-1,
906		.offset = (u64)-1
907	};
908
909	rc = kzalloc(sizeof(*rc), GFP_KERNEL);
910	if (!rc)
911		return ERR_PTR(-ENOMEM);
912
913	rc->fs_info = root->fs_info;
914	rc->key_start = *key_start;
915	rc->key_end = *key_end;
916	atomic_set(&rc->elems, 0);
917	init_waitqueue_head(&rc->wait);
918	kref_init(&rc->refcnt);
919	kref_get(&rc->refcnt); /* one ref for having elements */
920
921	node = btrfs_root_node(root);
922	start = node->start;
923	generation = btrfs_header_generation(node);
924	free_extent_buffer(node);
925
926	ret = reada_add_block(rc, start, &max_key, generation);
927	if (ret) {
928		kfree(rc);
929		return ERR_PTR(ret);
930	}
931
932	reada_start_machine(root->fs_info);
933
934	return rc;
935}
936
937#ifdef DEBUG
938int btrfs_reada_wait(void *handle)
939{
940	struct reada_control *rc = handle;
941	struct btrfs_fs_info *fs_info = rc->fs_info;
942
943	while (atomic_read(&rc->elems)) {
944		if (!atomic_read(&fs_info->reada_works_cnt))
945			reada_start_machine(fs_info);
946		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
947				   5 * HZ);
948		dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
949	}
950
951	dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
952
953	kref_put(&rc->refcnt, reada_control_release);
954
955	return 0;
956}
957#else
958int btrfs_reada_wait(void *handle)
959{
960	struct reada_control *rc = handle;
961	struct btrfs_fs_info *fs_info = rc->fs_info;
962
963	while (atomic_read(&rc->elems)) {
964		if (!atomic_read(&fs_info->reada_works_cnt))
965			reada_start_machine(fs_info);
966		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
967				   (HZ + 9) / 10);
968	}
969
970	kref_put(&rc->refcnt, reada_control_release);
971
972	return 0;
973}
974#endif
975
976void btrfs_reada_detach(void *handle)
977{
978	struct reada_control *rc = handle;
979
980	kref_put(&rc->refcnt, reada_control_release);
981}