Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Copyright (C) 2011 STRATO.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18
 19#include <linux/sched.h>
 20#include <linux/pagemap.h>
 21#include <linux/writeback.h>
 22#include <linux/blkdev.h>
 23#include <linux/rbtree.h>
 24#include <linux/slab.h>
 25#include <linux/workqueue.h>
 26#include "ctree.h"
 27#include "volumes.h"
 28#include "disk-io.h"
 29#include "transaction.h"
 30#include "dev-replace.h"
 31
 32#undef DEBUG
 33
 34/*
 35 * This is the implementation for the generic read ahead framework.
 36 *
 37 * To trigger a readahead, btrfs_reada_add must be called. It will start
 38 * a read ahead for the given range [start, end) on tree root. The returned
 39 * handle can either be used to wait on the readahead to finish
 40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
 41 *
 42 * The read ahead works as follows:
 43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
 44 * reada_start_machine will then search for extents to prefetch and trigger
 45 * some reads. When a read finishes for a node, all contained node/leaf
 46 * pointers that lie in the given range will also be enqueued. The reads will
 47 * be triggered in sequential order, thus giving a big win over a naive
 48 * enumeration. It will also make use of multi-device layouts. Each disk
 49 * will have its on read pointer and all disks will by utilized in parallel.
 50 * Also will no two disks read both sides of a mirror simultaneously, as this
 51 * would waste seeking capacity. Instead both disks will read different parts
 52 * of the filesystem.
 53 * Any number of readaheads can be started in parallel. The read order will be
 54 * determined globally, i.e. 2 parallel readaheads will normally finish faster
 55 * than the 2 started one after another.
 56 */
 57
 58#define MAX_IN_FLIGHT 6
 59
 60struct reada_extctl {
 61	struct list_head	list;
 62	struct reada_control	*rc;
 63	u64			generation;
 64};
 65
 66struct reada_extent {
 67	u64			logical;
 68	struct btrfs_key	top;
 69	u32			blocksize;
 70	int			err;
 71	struct list_head	extctl;
 72	int 			refcnt;
 73	spinlock_t		lock;
 74	struct reada_zone	*zones[BTRFS_MAX_MIRRORS];
 75	int			nzones;
 76	struct btrfs_device	*scheduled_for;
 77};
 78
 79struct reada_zone {
 80	u64			start;
 81	u64			end;
 82	u64			elems;
 83	struct list_head	list;
 84	spinlock_t		lock;
 85	int			locked;
 86	struct btrfs_device	*device;
 87	struct btrfs_device	*devs[BTRFS_MAX_MIRRORS]; /* full list, incl
 88							   * self */
 89	int			ndevs;
 90	struct kref		refcnt;
 91};
 92
 93struct reada_machine_work {
 94	struct btrfs_work	work;
 95	struct btrfs_fs_info	*fs_info;
 96};
 97
 98static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
 99static void reada_control_release(struct kref *kref);
100static void reada_zone_release(struct kref *kref);
101static void reada_start_machine(struct btrfs_fs_info *fs_info);
102static void __reada_start_machine(struct btrfs_fs_info *fs_info);
103
104static int reada_add_block(struct reada_control *rc, u64 logical,
105			   struct btrfs_key *top, int level, u64 generation);
106
107/* recurses */
108/* in case of err, eb might be NULL */
109static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
110			    u64 start, int err)
111{
112	int level = 0;
113	int nritems;
114	int i;
115	u64 bytenr;
116	u64 generation;
117	struct reada_extent *re;
118	struct btrfs_fs_info *fs_info = root->fs_info;
119	struct list_head list;
120	unsigned long index = start >> PAGE_CACHE_SHIFT;
121	struct btrfs_device *for_dev;
122
123	if (eb)
124		level = btrfs_header_level(eb);
125
126	/* find extent */
127	spin_lock(&fs_info->reada_lock);
128	re = radix_tree_lookup(&fs_info->reada_tree, index);
129	if (re)
130		re->refcnt++;
131	spin_unlock(&fs_info->reada_lock);
132
133	if (!re)
134		return -1;
135
136	spin_lock(&re->lock);
137	/*
138	 * just take the full list from the extent. afterwards we
139	 * don't need the lock anymore
140	 */
141	list_replace_init(&re->extctl, &list);
142	for_dev = re->scheduled_for;
143	re->scheduled_for = NULL;
144	spin_unlock(&re->lock);
145
146	if (err == 0) {
147		nritems = level ? btrfs_header_nritems(eb) : 0;
148		generation = btrfs_header_generation(eb);
149		/*
150		 * FIXME: currently we just set nritems to 0 if this is a leaf,
151		 * effectively ignoring the content. In a next step we could
152		 * trigger more readahead depending from the content, e.g.
153		 * fetch the checksums for the extents in the leaf.
154		 */
155	} else {
156		/*
157		 * this is the error case, the extent buffer has not been
158		 * read correctly. We won't access anything from it and
159		 * just cleanup our data structures. Effectively this will
160		 * cut the branch below this node from read ahead.
161		 */
162		nritems = 0;
163		generation = 0;
164	}
165
166	for (i = 0; i < nritems; i++) {
167		struct reada_extctl *rec;
168		u64 n_gen;
169		struct btrfs_key key;
170		struct btrfs_key next_key;
171
172		btrfs_node_key_to_cpu(eb, &key, i);
173		if (i + 1 < nritems)
174			btrfs_node_key_to_cpu(eb, &next_key, i + 1);
175		else
176			next_key = re->top;
177		bytenr = btrfs_node_blockptr(eb, i);
178		n_gen = btrfs_node_ptr_generation(eb, i);
179
180		list_for_each_entry(rec, &list, list) {
181			struct reada_control *rc = rec->rc;
182
183			/*
184			 * if the generation doesn't match, just ignore this
185			 * extctl. This will probably cut off a branch from
186			 * prefetch. Alternatively one could start a new (sub-)
187			 * prefetch for this branch, starting again from root.
188			 * FIXME: move the generation check out of this loop
189			 */
190#ifdef DEBUG
191			if (rec->generation != generation) {
192				btrfs_debug(root->fs_info,
193					   "generation mismatch for (%llu,%d,%llu) %llu != %llu",
194				       key.objectid, key.type, key.offset,
195				       rec->generation, generation);
196			}
197#endif
198			if (rec->generation == generation &&
199			    btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
200			    btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
201				reada_add_block(rc, bytenr, &next_key,
202						level - 1, n_gen);
203		}
204	}
205	/*
206	 * free extctl records
207	 */
208	while (!list_empty(&list)) {
209		struct reada_control *rc;
210		struct reada_extctl *rec;
211
212		rec = list_first_entry(&list, struct reada_extctl, list);
213		list_del(&rec->list);
214		rc = rec->rc;
215		kfree(rec);
216
217		kref_get(&rc->refcnt);
218		if (atomic_dec_and_test(&rc->elems)) {
219			kref_put(&rc->refcnt, reada_control_release);
220			wake_up(&rc->wait);
221		}
222		kref_put(&rc->refcnt, reada_control_release);
223
224		reada_extent_put(fs_info, re);	/* one ref for each entry */
225	}
226	reada_extent_put(fs_info, re);	/* our ref */
227	if (for_dev)
228		atomic_dec(&for_dev->reada_in_flight);
229
230	return 0;
231}
232
233/*
234 * start is passed separately in case eb in NULL, which may be the case with
235 * failed I/O
236 */
237int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
238			 u64 start, int err)
239{
240	int ret;
241
242	ret = __readahead_hook(root, eb, start, err);
243
244	reada_start_machine(root->fs_info);
245
246	return ret;
247}
248
249static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
250					  struct btrfs_device *dev, u64 logical,
251					  struct btrfs_bio *bbio)
252{
253	int ret;
254	struct reada_zone *zone;
255	struct btrfs_block_group_cache *cache = NULL;
256	u64 start;
257	u64 end;
258	int i;
259
260	zone = NULL;
261	spin_lock(&fs_info->reada_lock);
262	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
263				     logical >> PAGE_CACHE_SHIFT, 1);
264	if (ret == 1)
265		kref_get(&zone->refcnt);
266	spin_unlock(&fs_info->reada_lock);
267
268	if (ret == 1) {
269		if (logical >= zone->start && logical < zone->end)
270			return zone;
271		spin_lock(&fs_info->reada_lock);
272		kref_put(&zone->refcnt, reada_zone_release);
273		spin_unlock(&fs_info->reada_lock);
274	}
275
276	cache = btrfs_lookup_block_group(fs_info, logical);
277	if (!cache)
278		return NULL;
279
280	start = cache->key.objectid;
281	end = start + cache->key.offset - 1;
282	btrfs_put_block_group(cache);
283
284	zone = kzalloc(sizeof(*zone), GFP_NOFS);
285	if (!zone)
286		return NULL;
287
288	zone->start = start;
289	zone->end = end;
290	INIT_LIST_HEAD(&zone->list);
291	spin_lock_init(&zone->lock);
292	zone->locked = 0;
293	kref_init(&zone->refcnt);
294	zone->elems = 0;
295	zone->device = dev; /* our device always sits at index 0 */
296	for (i = 0; i < bbio->num_stripes; ++i) {
297		/* bounds have already been checked */
298		zone->devs[i] = bbio->stripes[i].dev;
299	}
300	zone->ndevs = bbio->num_stripes;
301
302	spin_lock(&fs_info->reada_lock);
303	ret = radix_tree_insert(&dev->reada_zones,
304				(unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
305				zone);
306
307	if (ret == -EEXIST) {
308		kfree(zone);
309		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
310					     logical >> PAGE_CACHE_SHIFT, 1);
311		if (ret == 1)
312			kref_get(&zone->refcnt);
313	}
314	spin_unlock(&fs_info->reada_lock);
315
316	return zone;
317}
318
319static struct reada_extent *reada_find_extent(struct btrfs_root *root,
320					      u64 logical,
321					      struct btrfs_key *top, int level)
322{
323	int ret;
324	struct reada_extent *re = NULL;
325	struct reada_extent *re_exist = NULL;
326	struct btrfs_fs_info *fs_info = root->fs_info;
327	struct btrfs_bio *bbio = NULL;
328	struct btrfs_device *dev;
329	struct btrfs_device *prev_dev;
330	u32 blocksize;
331	u64 length;
332	int nzones = 0;
333	int i;
334	unsigned long index = logical >> PAGE_CACHE_SHIFT;
335	int dev_replace_is_ongoing;
336
337	spin_lock(&fs_info->reada_lock);
338	re = radix_tree_lookup(&fs_info->reada_tree, index);
339	if (re)
340		re->refcnt++;
341	spin_unlock(&fs_info->reada_lock);
342
343	if (re)
344		return re;
345
346	re = kzalloc(sizeof(*re), GFP_NOFS);
347	if (!re)
348		return NULL;
349
350	blocksize = btrfs_level_size(root, level);
351	re->logical = logical;
352	re->blocksize = blocksize;
353	re->top = *top;
354	INIT_LIST_HEAD(&re->extctl);
355	spin_lock_init(&re->lock);
356	re->refcnt = 1;
357
358	/*
359	 * map block
360	 */
361	length = blocksize;
362	ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
363			      &bbio, 0);
364	if (ret || !bbio || length < blocksize)
365		goto error;
366
367	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
368		btrfs_err(root->fs_info,
369			   "readahead: more than %d copies not supported",
370			   BTRFS_MAX_MIRRORS);
371		goto error;
372	}
373
374	for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
375		struct reada_zone *zone;
376
377		dev = bbio->stripes[nzones].dev;
378		zone = reada_find_zone(fs_info, dev, logical, bbio);
379		if (!zone)
380			break;
381
382		re->zones[nzones] = zone;
383		spin_lock(&zone->lock);
384		if (!zone->elems)
385			kref_get(&zone->refcnt);
386		++zone->elems;
387		spin_unlock(&zone->lock);
388		spin_lock(&fs_info->reada_lock);
389		kref_put(&zone->refcnt, reada_zone_release);
390		spin_unlock(&fs_info->reada_lock);
391	}
392	re->nzones = nzones;
393	if (nzones == 0) {
394		/* not a single zone found, error and out */
395		goto error;
396	}
397
398	/* insert extent in reada_tree + all per-device trees, all or nothing */
399	btrfs_dev_replace_lock(&fs_info->dev_replace);
400	spin_lock(&fs_info->reada_lock);
401	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
402	if (ret == -EEXIST) {
403		re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
404		BUG_ON(!re_exist);
405		re_exist->refcnt++;
406		spin_unlock(&fs_info->reada_lock);
407		btrfs_dev_replace_unlock(&fs_info->dev_replace);
408		goto error;
409	}
410	if (ret) {
411		spin_unlock(&fs_info->reada_lock);
412		btrfs_dev_replace_unlock(&fs_info->dev_replace);
413		goto error;
414	}
415	prev_dev = NULL;
416	dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
417			&fs_info->dev_replace);
418	for (i = 0; i < nzones; ++i) {
419		dev = bbio->stripes[i].dev;
420		if (dev == prev_dev) {
421			/*
422			 * in case of DUP, just add the first zone. As both
423			 * are on the same device, there's nothing to gain
424			 * from adding both.
425			 * Also, it wouldn't work, as the tree is per device
426			 * and adding would fail with EEXIST
427			 */
428			continue;
429		}
430		if (!dev->bdev) {
431			/* cannot read ahead on missing device */
432			continue;
433		}
434		if (dev_replace_is_ongoing &&
435		    dev == fs_info->dev_replace.tgtdev) {
436			/*
437			 * as this device is selected for reading only as
438			 * a last resort, skip it for read ahead.
439			 */
440			continue;
441		}
442		prev_dev = dev;
443		ret = radix_tree_insert(&dev->reada_extents, index, re);
444		if (ret) {
445			while (--i >= 0) {
446				dev = bbio->stripes[i].dev;
447				BUG_ON(dev == NULL);
448				/* ignore whether the entry was inserted */
449				radix_tree_delete(&dev->reada_extents, index);
450			}
451			BUG_ON(fs_info == NULL);
452			radix_tree_delete(&fs_info->reada_tree, index);
453			spin_unlock(&fs_info->reada_lock);
454			btrfs_dev_replace_unlock(&fs_info->dev_replace);
455			goto error;
456		}
457	}
458	spin_unlock(&fs_info->reada_lock);
459	btrfs_dev_replace_unlock(&fs_info->dev_replace);
460
461	kfree(bbio);
462	return re;
463
464error:
465	while (nzones) {
466		struct reada_zone *zone;
467
468		--nzones;
469		zone = re->zones[nzones];
470		kref_get(&zone->refcnt);
471		spin_lock(&zone->lock);
472		--zone->elems;
473		if (zone->elems == 0) {
474			/*
475			 * no fs_info->reada_lock needed, as this can't be
476			 * the last ref
477			 */
478			kref_put(&zone->refcnt, reada_zone_release);
479		}
480		spin_unlock(&zone->lock);
481
482		spin_lock(&fs_info->reada_lock);
483		kref_put(&zone->refcnt, reada_zone_release);
484		spin_unlock(&fs_info->reada_lock);
485	}
486	kfree(bbio);
487	kfree(re);
488	return re_exist;
489}
490
491static void reada_extent_put(struct btrfs_fs_info *fs_info,
492			     struct reada_extent *re)
493{
494	int i;
495	unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
496
497	spin_lock(&fs_info->reada_lock);
498	if (--re->refcnt) {
499		spin_unlock(&fs_info->reada_lock);
500		return;
501	}
502
503	radix_tree_delete(&fs_info->reada_tree, index);
504	for (i = 0; i < re->nzones; ++i) {
505		struct reada_zone *zone = re->zones[i];
506
507		radix_tree_delete(&zone->device->reada_extents, index);
508	}
509
510	spin_unlock(&fs_info->reada_lock);
511
512	for (i = 0; i < re->nzones; ++i) {
513		struct reada_zone *zone = re->zones[i];
514
515		kref_get(&zone->refcnt);
516		spin_lock(&zone->lock);
517		--zone->elems;
518		if (zone->elems == 0) {
519			/* no fs_info->reada_lock needed, as this can't be
520			 * the last ref */
521			kref_put(&zone->refcnt, reada_zone_release);
522		}
523		spin_unlock(&zone->lock);
524
525		spin_lock(&fs_info->reada_lock);
526		kref_put(&zone->refcnt, reada_zone_release);
527		spin_unlock(&fs_info->reada_lock);
528	}
529	if (re->scheduled_for)
530		atomic_dec(&re->scheduled_for->reada_in_flight);
531
532	kfree(re);
533}
534
535static void reada_zone_release(struct kref *kref)
536{
537	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
538
539	radix_tree_delete(&zone->device->reada_zones,
540			  zone->end >> PAGE_CACHE_SHIFT);
541
542	kfree(zone);
543}
544
545static void reada_control_release(struct kref *kref)
546{
547	struct reada_control *rc = container_of(kref, struct reada_control,
548						refcnt);
549
550	kfree(rc);
551}
552
553static int reada_add_block(struct reada_control *rc, u64 logical,
554			   struct btrfs_key *top, int level, u64 generation)
555{
556	struct btrfs_root *root = rc->root;
557	struct reada_extent *re;
558	struct reada_extctl *rec;
559
560	re = reada_find_extent(root, logical, top, level); /* takes one ref */
561	if (!re)
562		return -1;
563
564	rec = kzalloc(sizeof(*rec), GFP_NOFS);
565	if (!rec) {
566		reada_extent_put(root->fs_info, re);
567		return -1;
568	}
569
570	rec->rc = rc;
571	rec->generation = generation;
572	atomic_inc(&rc->elems);
573
574	spin_lock(&re->lock);
575	list_add_tail(&rec->list, &re->extctl);
576	spin_unlock(&re->lock);
577
578	/* leave the ref on the extent */
579
580	return 0;
581}
582
583/*
584 * called with fs_info->reada_lock held
585 */
586static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
587{
588	int i;
589	unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
590
591	for (i = 0; i < zone->ndevs; ++i) {
592		struct reada_zone *peer;
593		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
594		if (peer && peer->device != zone->device)
595			peer->locked = lock;
596	}
597}
598
599/*
600 * called with fs_info->reada_lock held
601 */
602static int reada_pick_zone(struct btrfs_device *dev)
603{
604	struct reada_zone *top_zone = NULL;
605	struct reada_zone *top_locked_zone = NULL;
606	u64 top_elems = 0;
607	u64 top_locked_elems = 0;
608	unsigned long index = 0;
609	int ret;
610
611	if (dev->reada_curr_zone) {
612		reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
613		kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
614		dev->reada_curr_zone = NULL;
615	}
616	/* pick the zone with the most elements */
617	while (1) {
618		struct reada_zone *zone;
619
620		ret = radix_tree_gang_lookup(&dev->reada_zones,
621					     (void **)&zone, index, 1);
622		if (ret == 0)
623			break;
624		index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
625		if (zone->locked) {
626			if (zone->elems > top_locked_elems) {
627				top_locked_elems = zone->elems;
628				top_locked_zone = zone;
629			}
630		} else {
631			if (zone->elems > top_elems) {
632				top_elems = zone->elems;
633				top_zone = zone;
634			}
635		}
636	}
637	if (top_zone)
638		dev->reada_curr_zone = top_zone;
639	else if (top_locked_zone)
640		dev->reada_curr_zone = top_locked_zone;
641	else
642		return 0;
643
644	dev->reada_next = dev->reada_curr_zone->start;
645	kref_get(&dev->reada_curr_zone->refcnt);
646	reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
647
648	return 1;
649}
650
651static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
652				   struct btrfs_device *dev)
653{
654	struct reada_extent *re = NULL;
655	int mirror_num = 0;
656	struct extent_buffer *eb = NULL;
657	u64 logical;
658	u32 blocksize;
659	int ret;
660	int i;
661	int need_kick = 0;
662
663	spin_lock(&fs_info->reada_lock);
664	if (dev->reada_curr_zone == NULL) {
665		ret = reada_pick_zone(dev);
666		if (!ret) {
667			spin_unlock(&fs_info->reada_lock);
668			return 0;
669		}
670	}
671	/*
672	 * FIXME currently we issue the reads one extent at a time. If we have
673	 * a contiguous block of extents, we could also coagulate them or use
674	 * plugging to speed things up
675	 */
676	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
677				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
678	if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
679		ret = reada_pick_zone(dev);
680		if (!ret) {
681			spin_unlock(&fs_info->reada_lock);
682			return 0;
683		}
684		re = NULL;
685		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
686					dev->reada_next >> PAGE_CACHE_SHIFT, 1);
687	}
688	if (ret == 0) {
689		spin_unlock(&fs_info->reada_lock);
690		return 0;
691	}
692	dev->reada_next = re->logical + re->blocksize;
693	re->refcnt++;
694
695	spin_unlock(&fs_info->reada_lock);
696
697	/*
698	 * find mirror num
699	 */
700	for (i = 0; i < re->nzones; ++i) {
701		if (re->zones[i]->device == dev) {
702			mirror_num = i + 1;
703			break;
704		}
705	}
706	logical = re->logical;
707	blocksize = re->blocksize;
708
709	spin_lock(&re->lock);
710	if (re->scheduled_for == NULL) {
711		re->scheduled_for = dev;
712		need_kick = 1;
713	}
714	spin_unlock(&re->lock);
715
716	reada_extent_put(fs_info, re);
717
718	if (!need_kick)
719		return 0;
720
721	atomic_inc(&dev->reada_in_flight);
722	ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
723			 mirror_num, &eb);
724	if (ret)
725		__readahead_hook(fs_info->extent_root, NULL, logical, ret);
726	else if (eb)
727		__readahead_hook(fs_info->extent_root, eb, eb->start, ret);
728
729	if (eb)
730		free_extent_buffer(eb);
731
732	return 1;
733
734}
735
736static void reada_start_machine_worker(struct btrfs_work *work)
737{
738	struct reada_machine_work *rmw;
739	struct btrfs_fs_info *fs_info;
740	int old_ioprio;
741
742	rmw = container_of(work, struct reada_machine_work, work);
743	fs_info = rmw->fs_info;
744
745	kfree(rmw);
746
747	old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
748				       task_nice_ioprio(current));
749	set_task_ioprio(current, BTRFS_IOPRIO_READA);
750	__reada_start_machine(fs_info);
751	set_task_ioprio(current, old_ioprio);
752}
753
754static void __reada_start_machine(struct btrfs_fs_info *fs_info)
755{
756	struct btrfs_device *device;
757	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
758	u64 enqueued;
759	u64 total = 0;
760	int i;
761
762	do {
763		enqueued = 0;
764		list_for_each_entry(device, &fs_devices->devices, dev_list) {
765			if (atomic_read(&device->reada_in_flight) <
766			    MAX_IN_FLIGHT)
767				enqueued += reada_start_machine_dev(fs_info,
768								    device);
769		}
770		total += enqueued;
771	} while (enqueued && total < 10000);
772
773	if (enqueued == 0)
774		return;
775
776	/*
777	 * If everything is already in the cache, this is effectively single
778	 * threaded. To a) not hold the caller for too long and b) to utilize
779	 * more cores, we broke the loop above after 10000 iterations and now
780	 * enqueue to workers to finish it. This will distribute the load to
781	 * the cores.
782	 */
783	for (i = 0; i < 2; ++i)
784		reada_start_machine(fs_info);
785}
786
787static void reada_start_machine(struct btrfs_fs_info *fs_info)
788{
789	struct reada_machine_work *rmw;
790
791	rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
792	if (!rmw) {
793		/* FIXME we cannot handle this properly right now */
794		BUG();
795	}
796	btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
797	rmw->fs_info = fs_info;
798
799	btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
800}
801
802#ifdef DEBUG
803static void dump_devs(struct btrfs_fs_info *fs_info, int all)
804{
805	struct btrfs_device *device;
806	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
807	unsigned long index;
808	int ret;
809	int i;
810	int j;
811	int cnt;
812
813	spin_lock(&fs_info->reada_lock);
814	list_for_each_entry(device, &fs_devices->devices, dev_list) {
815		printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
816			atomic_read(&device->reada_in_flight));
817		index = 0;
818		while (1) {
819			struct reada_zone *zone;
820			ret = radix_tree_gang_lookup(&device->reada_zones,
821						     (void **)&zone, index, 1);
822			if (ret == 0)
823				break;
824			printk(KERN_DEBUG "  zone %llu-%llu elems %llu locked "
825				"%d devs", zone->start, zone->end, zone->elems,
826				zone->locked);
827			for (j = 0; j < zone->ndevs; ++j) {
828				printk(KERN_CONT " %lld",
829					zone->devs[j]->devid);
830			}
831			if (device->reada_curr_zone == zone)
832				printk(KERN_CONT " curr off %llu",
833					device->reada_next - zone->start);
834			printk(KERN_CONT "\n");
835			index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
836		}
837		cnt = 0;
838		index = 0;
839		while (all) {
840			struct reada_extent *re = NULL;
841
842			ret = radix_tree_gang_lookup(&device->reada_extents,
843						     (void **)&re, index, 1);
844			if (ret == 0)
845				break;
846			printk(KERN_DEBUG
847				"  re: logical %llu size %u empty %d for %lld",
848				re->logical, re->blocksize,
849				list_empty(&re->extctl), re->scheduled_for ?
850				re->scheduled_for->devid : -1);
851
852			for (i = 0; i < re->nzones; ++i) {
853				printk(KERN_CONT " zone %llu-%llu devs",
854					re->zones[i]->start,
855					re->zones[i]->end);
856				for (j = 0; j < re->zones[i]->ndevs; ++j) {
857					printk(KERN_CONT " %lld",
858						re->zones[i]->devs[j]->devid);
859				}
860			}
861			printk(KERN_CONT "\n");
862			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
863			if (++cnt > 15)
864				break;
865		}
866	}
867
868	index = 0;
869	cnt = 0;
870	while (all) {
871		struct reada_extent *re = NULL;
872
873		ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
874					     index, 1);
875		if (ret == 0)
876			break;
877		if (!re->scheduled_for) {
878			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
879			continue;
880		}
881		printk(KERN_DEBUG
882			"re: logical %llu size %u list empty %d for %lld",
883			re->logical, re->blocksize, list_empty(&re->extctl),
884			re->scheduled_for ? re->scheduled_for->devid : -1);
885		for (i = 0; i < re->nzones; ++i) {
886			printk(KERN_CONT " zone %llu-%llu devs",
887				re->zones[i]->start,
888				re->zones[i]->end);
889			for (i = 0; i < re->nzones; ++i) {
890				printk(KERN_CONT " zone %llu-%llu devs",
891					re->zones[i]->start,
892					re->zones[i]->end);
893				for (j = 0; j < re->zones[i]->ndevs; ++j) {
894					printk(KERN_CONT " %lld",
895						re->zones[i]->devs[j]->devid);
896				}
897			}
898		}
899		printk(KERN_CONT "\n");
900		index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
901	}
902	spin_unlock(&fs_info->reada_lock);
903}
904#endif
905
906/*
907 * interface
908 */
909struct reada_control *btrfs_reada_add(struct btrfs_root *root,
910			struct btrfs_key *key_start, struct btrfs_key *key_end)
911{
912	struct reada_control *rc;
913	u64 start;
914	u64 generation;
915	int level;
916	struct extent_buffer *node;
917	static struct btrfs_key max_key = {
918		.objectid = (u64)-1,
919		.type = (u8)-1,
920		.offset = (u64)-1
921	};
922
923	rc = kzalloc(sizeof(*rc), GFP_NOFS);
924	if (!rc)
925		return ERR_PTR(-ENOMEM);
926
927	rc->root = root;
928	rc->key_start = *key_start;
929	rc->key_end = *key_end;
930	atomic_set(&rc->elems, 0);
931	init_waitqueue_head(&rc->wait);
932	kref_init(&rc->refcnt);
933	kref_get(&rc->refcnt); /* one ref for having elements */
934
935	node = btrfs_root_node(root);
936	start = node->start;
937	level = btrfs_header_level(node);
938	generation = btrfs_header_generation(node);
939	free_extent_buffer(node);
940
941	if (reada_add_block(rc, start, &max_key, level, generation)) {
942		kfree(rc);
943		return ERR_PTR(-ENOMEM);
944	}
945
946	reada_start_machine(root->fs_info);
947
948	return rc;
949}
950
951#ifdef DEBUG
952int btrfs_reada_wait(void *handle)
953{
954	struct reada_control *rc = handle;
955
956	while (atomic_read(&rc->elems)) {
957		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
958				   5 * HZ);
959		dump_devs(rc->root->fs_info,
960			  atomic_read(&rc->elems) < 10 ? 1 : 0);
961	}
962
963	dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
964
965	kref_put(&rc->refcnt, reada_control_release);
966
967	return 0;
968}
969#else
970int btrfs_reada_wait(void *handle)
971{
972	struct reada_control *rc = handle;
973
974	while (atomic_read(&rc->elems)) {
975		wait_event(rc->wait, atomic_read(&rc->elems) == 0);
976	}
977
978	kref_put(&rc->refcnt, reada_control_release);
979
980	return 0;
981}
982#endif
983
984void btrfs_reada_detach(void *handle)
985{
986	struct reada_control *rc = handle;
987
988	kref_put(&rc->refcnt, reada_control_release);
989}