Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3   raid0.c : Multiple Devices driver for Linux
  4	     Copyright (C) 1994-96 Marc ZYNGIER
  5	     <zyngier@ufr-info-p7.ibp.fr> or
  6	     <maz@gloups.fdn.fr>
  7	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  8
  9   RAID-0 management functions.
 10
 
 
 
 
 
 
 
 
 11*/
 12
 13#include <linux/blkdev.h>
 14#include <linux/seq_file.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17#include <trace/events/block.h>
 18#include "md.h"
 19#include "raid0.h"
 20#include "raid5.h"
 21
 22static int default_layout = 0;
 23module_param(default_layout, int, 0644);
 24
 25#define UNSUPPORTED_MDDEV_FLAGS		\
 26	((1L << MD_HAS_JOURNAL) |	\
 27	 (1L << MD_JOURNAL_CLEAN) |	\
 28	 (1L << MD_FAILFAST_SUPPORTED) |\
 29	 (1L << MD_HAS_PPL) |		\
 30	 (1L << MD_HAS_MULTIPLE_PPLS))
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32/*
 33 * inform the user of the raid configuration
 34*/
 35static void dump_zones(struct mddev *mddev)
 36{
 37	int j, k;
 38	sector_t zone_size = 0;
 39	sector_t zone_start = 0;
 40	char b[BDEVNAME_SIZE];
 41	struct r0conf *conf = mddev->private;
 42	int raid_disks = conf->strip_zone[0].nb_dev;
 43	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
 44		 mdname(mddev),
 45		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 46	for (j = 0; j < conf->nr_strip_zones; j++) {
 47		char line[200];
 48		int len = 0;
 49
 50		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 51			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
 52					bdevname(conf->devlist[j*raid_disks
 53							       + k]->bdev, b));
 54		pr_debug("md: zone%d=[%s]\n", j, line);
 55
 56		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 57		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 58			(unsigned long long)zone_start>>1,
 59			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 60			(unsigned long long)zone_size>>1);
 61		zone_start = conf->strip_zone[j].zone_end;
 62	}
 63}
 64
 65static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 66{
 67	int i, c, err;
 68	sector_t curr_zone_end, sectors;
 69	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 70	struct strip_zone *zone;
 71	int cnt;
 72	char b[BDEVNAME_SIZE];
 73	char b2[BDEVNAME_SIZE];
 74	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 75	unsigned blksize = 512;
 76
 77	*private_conf = ERR_PTR(-ENOMEM);
 78	if (!conf)
 79		return -ENOMEM;
 80	rdev_for_each(rdev1, mddev) {
 81		pr_debug("md/raid0:%s: looking at %s\n",
 82			 mdname(mddev),
 83			 bdevname(rdev1->bdev, b));
 84		c = 0;
 85
 86		/* round size to chunk_size */
 87		sectors = rdev1->sectors;
 88		sector_div(sectors, mddev->chunk_sectors);
 89		rdev1->sectors = sectors * mddev->chunk_sectors;
 90
 91		blksize = max(blksize, queue_logical_block_size(
 92				      rdev1->bdev->bd_disk->queue));
 93
 94		rdev_for_each(rdev2, mddev) {
 95			pr_debug("md/raid0:%s:   comparing %s(%llu)"
 96				 " with %s(%llu)\n",
 97				 mdname(mddev),
 98				 bdevname(rdev1->bdev,b),
 99				 (unsigned long long)rdev1->sectors,
100				 bdevname(rdev2->bdev,b2),
101				 (unsigned long long)rdev2->sectors);
102			if (rdev2 == rdev1) {
103				pr_debug("md/raid0:%s:   END\n",
104					 mdname(mddev));
105				break;
106			}
107			if (rdev2->sectors == rdev1->sectors) {
108				/*
109				 * Not unique, don't count it as a new
110				 * group
111				 */
112				pr_debug("md/raid0:%s:   EQUAL\n",
113					 mdname(mddev));
114				c = 1;
115				break;
116			}
117			pr_debug("md/raid0:%s:   NOT EQUAL\n",
118				 mdname(mddev));
119		}
120		if (!c) {
121			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
122				 mdname(mddev));
123			conf->nr_strip_zones++;
124			pr_debug("md/raid0:%s: %d zones\n",
125				 mdname(mddev), conf->nr_strip_zones);
126		}
127	}
128	pr_debug("md/raid0:%s: FINAL %d zones\n",
129		 mdname(mddev), conf->nr_strip_zones);
130
131	if (conf->nr_strip_zones == 1) {
132		conf->layout = RAID0_ORIG_LAYOUT;
133	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
134		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
135		conf->layout = mddev->layout;
136	} else if (default_layout == RAID0_ORIG_LAYOUT ||
137		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
138		conf->layout = default_layout;
139	} else {
140		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
141		       mdname(mddev));
142		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
143		err = -ENOTSUPP;
144		goto abort;
145	}
146	/*
147	 * now since we have the hard sector sizes, we can make sure
148	 * chunk size is a multiple of that sector size
149	 */
150	if ((mddev->chunk_sectors << 9) % blksize) {
151		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
152			mdname(mddev),
153			mddev->chunk_sectors << 9, blksize);
154		err = -EINVAL;
155		goto abort;
156	}
157
158	err = -ENOMEM;
159	conf->strip_zone = kcalloc(conf->nr_strip_zones,
160				   sizeof(struct strip_zone),
161				   GFP_KERNEL);
162	if (!conf->strip_zone)
163		goto abort;
164	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
165					    conf->nr_strip_zones,
166					    mddev->raid_disks),
167				GFP_KERNEL);
168	if (!conf->devlist)
169		goto abort;
170
171	/* The first zone must contain all devices, so here we check that
172	 * there is a proper alignment of slots to devices and find them all
173	 */
174	zone = &conf->strip_zone[0];
175	cnt = 0;
176	smallest = NULL;
177	dev = conf->devlist;
178	err = -EINVAL;
179	rdev_for_each(rdev1, mddev) {
180		int j = rdev1->raid_disk;
181
182		if (mddev->level == 10) {
183			/* taking over a raid10-n2 array */
184			j /= 2;
185			rdev1->new_raid_disk = j;
186		}
187
188		if (mddev->level == 1) {
189			/* taiking over a raid1 array-
190			 * we have only one active disk
191			 */
192			j = 0;
193			rdev1->new_raid_disk = j;
194		}
195
196		if (j < 0) {
197			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
198				mdname(mddev));
199			goto abort;
200		}
201		if (j >= mddev->raid_disks) {
202			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
203				mdname(mddev), j);
204			goto abort;
205		}
206		if (dev[j]) {
207			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
208				mdname(mddev), j);
209			goto abort;
210		}
211		dev[j] = rdev1;
212
213		if (!smallest || (rdev1->sectors < smallest->sectors))
214			smallest = rdev1;
215		cnt++;
216	}
217	if (cnt != mddev->raid_disks) {
218		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
219			mdname(mddev), cnt, mddev->raid_disks);
220		goto abort;
221	}
222	zone->nb_dev = cnt;
223	zone->zone_end = smallest->sectors * cnt;
224
225	curr_zone_end = zone->zone_end;
226
227	/* now do the other zones */
228	for (i = 1; i < conf->nr_strip_zones; i++)
229	{
230		int j;
231
232		zone = conf->strip_zone + i;
233		dev = conf->devlist + i * mddev->raid_disks;
234
235		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
236		zone->dev_start = smallest->sectors;
237		smallest = NULL;
238		c = 0;
239
240		for (j=0; j<cnt; j++) {
241			rdev = conf->devlist[j];
242			if (rdev->sectors <= zone->dev_start) {
243				pr_debug("md/raid0:%s: checking %s ... nope\n",
244					 mdname(mddev),
245					 bdevname(rdev->bdev, b));
246				continue;
247			}
248			pr_debug("md/raid0:%s: checking %s ..."
249				 " contained as device %d\n",
250				 mdname(mddev),
251				 bdevname(rdev->bdev, b), c);
252			dev[c] = rdev;
253			c++;
254			if (!smallest || rdev->sectors < smallest->sectors) {
255				smallest = rdev;
256				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
257					 mdname(mddev),
258					 (unsigned long long)rdev->sectors);
259			}
260		}
261
262		zone->nb_dev = c;
263		sectors = (smallest->sectors - zone->dev_start) * c;
264		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
265			 mdname(mddev),
266			 zone->nb_dev, (unsigned long long)sectors);
267
268		curr_zone_end += sectors;
269		zone->zone_end = curr_zone_end;
270
271		pr_debug("md/raid0:%s: current zone start: %llu\n",
272			 mdname(mddev),
273			 (unsigned long long)smallest->sectors);
274	}
275
276	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
277	*private_conf = conf;
278
279	return 0;
280abort:
281	kfree(conf->strip_zone);
282	kfree(conf->devlist);
283	kfree(conf);
284	*private_conf = ERR_PTR(err);
285	return err;
286}
287
288/* Find the zone which holds a particular offset
289 * Update *sectorp to be an offset in that zone
290 */
291static struct strip_zone *find_zone(struct r0conf *conf,
292				    sector_t *sectorp)
293{
294	int i;
295	struct strip_zone *z = conf->strip_zone;
296	sector_t sector = *sectorp;
297
298	for (i = 0; i < conf->nr_strip_zones; i++)
299		if (sector < z[i].zone_end) {
300			if (i)
301				*sectorp = sector - z[i-1].zone_end;
302			return z + i;
303		}
304	BUG();
305}
306
307/*
308 * remaps the bio to the target device. we separate two flows.
309 * power 2 flow and a general flow for the sake of performance
310*/
311static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
312				sector_t sector, sector_t *sector_offset)
313{
314	unsigned int sect_in_chunk;
315	sector_t chunk;
316	struct r0conf *conf = mddev->private;
317	int raid_disks = conf->strip_zone[0].nb_dev;
318	unsigned int chunk_sects = mddev->chunk_sectors;
319
320	if (is_power_of_2(chunk_sects)) {
321		int chunksect_bits = ffz(~chunk_sects);
322		/* find the sector offset inside the chunk */
323		sect_in_chunk  = sector & (chunk_sects - 1);
324		sector >>= chunksect_bits;
325		/* chunk in zone */
326		chunk = *sector_offset;
327		/* quotient is the chunk in real device*/
328		sector_div(chunk, zone->nb_dev << chunksect_bits);
329	} else{
330		sect_in_chunk = sector_div(sector, chunk_sects);
331		chunk = *sector_offset;
332		sector_div(chunk, chunk_sects * zone->nb_dev);
333	}
334	/*
335	*  position the bio over the real device
336	*  real sector = chunk in device + starting of zone
337	*	+ the position in the chunk
338	*/
339	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
340	return conf->devlist[(zone - conf->strip_zone)*raid_disks
341			     + sector_div(sector, zone->nb_dev)];
342}
343
344static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
345{
346	sector_t array_sectors = 0;
347	struct md_rdev *rdev;
348
349	WARN_ONCE(sectors || raid_disks,
350		  "%s does not support generic reshape\n", __func__);
351
352	rdev_for_each(rdev, mddev)
353		array_sectors += (rdev->sectors &
354				  ~(sector_t)(mddev->chunk_sectors-1));
355
356	return array_sectors;
357}
358
359static void raid0_free(struct mddev *mddev, void *priv);
360
361static int raid0_run(struct mddev *mddev)
362{
363	struct r0conf *conf;
364	int ret;
365
366	if (mddev->chunk_sectors == 0) {
367		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
368		return -EINVAL;
369	}
370	if (md_check_no_bitmap(mddev))
371		return -EINVAL;
372
373	/* if private is not null, we are here after takeover */
374	if (mddev->private == NULL) {
375		ret = create_strip_zones(mddev, &conf);
376		if (ret < 0)
377			return ret;
378		mddev->private = conf;
379	}
380	conf = mddev->private;
381	if (mddev->queue) {
382		struct md_rdev *rdev;
383		bool discard_supported = false;
384
385		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
386		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
387		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
388		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
389
390		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
391		blk_queue_io_opt(mddev->queue,
392				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
393
394		rdev_for_each(rdev, mddev) {
395			disk_stack_limits(mddev->gendisk, rdev->bdev,
396					  rdev->data_offset << 9);
397			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
398				discard_supported = true;
399		}
400		if (!discard_supported)
401			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
402		else
403			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
404	}
405
406	/* calculate array device size */
407	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
408
409	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
410		 mdname(mddev),
411		 (unsigned long long)mddev->array_sectors);
412
413	if (mddev->queue) {
414		/* calculate the max read-ahead size.
415		 * For read-ahead of large files to be effective, we need to
416		 * readahead at least twice a whole stripe. i.e. number of devices
417		 * multiplied by chunk size times 2.
418		 * If an individual device has an ra_pages greater than the
419		 * chunk size, then we will not drive that device as hard as it
420		 * wants.  We consider this a configuration error: a larger
421		 * chunksize should be used in that case.
422		 */
423		int stripe = mddev->raid_disks *
424			(mddev->chunk_sectors << 9) / PAGE_SIZE;
425		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
426			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
427	}
428
429	dump_zones(mddev);
430
431	ret = md_integrity_register(mddev);
432
433	return ret;
434}
435
436static void raid0_free(struct mddev *mddev, void *priv)
437{
438	struct r0conf *conf = priv;
439
440	kfree(conf->strip_zone);
441	kfree(conf->devlist);
442	kfree(conf);
443}
444
445/*
446 * Is io distribute over 1 or more chunks ?
447*/
448static inline int is_io_in_chunk_boundary(struct mddev *mddev,
449			unsigned int chunk_sects, struct bio *bio)
450{
451	if (likely(is_power_of_2(chunk_sects))) {
452		return chunk_sects >=
453			((bio->bi_iter.bi_sector & (chunk_sects-1))
454					+ bio_sectors(bio));
455	} else{
456		sector_t sector = bio->bi_iter.bi_sector;
457		return chunk_sects >= (sector_div(sector, chunk_sects)
458						+ bio_sectors(bio));
459	}
460}
461
462static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
463{
464	struct r0conf *conf = mddev->private;
465	struct strip_zone *zone;
466	sector_t start = bio->bi_iter.bi_sector;
467	sector_t end;
468	unsigned int stripe_size;
469	sector_t first_stripe_index, last_stripe_index;
470	sector_t start_disk_offset;
471	unsigned int start_disk_index;
472	sector_t end_disk_offset;
473	unsigned int end_disk_index;
474	unsigned int disk;
475
476	zone = find_zone(conf, &start);
477
478	if (bio_end_sector(bio) > zone->zone_end) {
479		struct bio *split = bio_split(bio,
480			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
481			&mddev->bio_set);
482		bio_chain(split, bio);
483		submit_bio_noacct(bio);
484		bio = split;
485		end = zone->zone_end;
486	} else
487		end = bio_end_sector(bio);
488
489	if (zone != conf->strip_zone)
490		end = end - zone[-1].zone_end;
491
492	/* Now start and end is the offset in zone */
493	stripe_size = zone->nb_dev * mddev->chunk_sectors;
494
495	first_stripe_index = start;
496	sector_div(first_stripe_index, stripe_size);
497	last_stripe_index = end;
498	sector_div(last_stripe_index, stripe_size);
499
500	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
501		mddev->chunk_sectors;
502	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
503		mddev->chunk_sectors) +
504		first_stripe_index * mddev->chunk_sectors;
505	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
506		mddev->chunk_sectors;
507	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
508		mddev->chunk_sectors) +
509		last_stripe_index * mddev->chunk_sectors;
510
511	for (disk = 0; disk < zone->nb_dev; disk++) {
512		sector_t dev_start, dev_end;
513		struct bio *discard_bio = NULL;
514		struct md_rdev *rdev;
515
516		if (disk < start_disk_index)
517			dev_start = (first_stripe_index + 1) *
518				mddev->chunk_sectors;
519		else if (disk > start_disk_index)
520			dev_start = first_stripe_index * mddev->chunk_sectors;
521		else
522			dev_start = start_disk_offset;
523
524		if (disk < end_disk_index)
525			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
526		else if (disk > end_disk_index)
527			dev_end = last_stripe_index * mddev->chunk_sectors;
528		else
529			dev_end = end_disk_offset;
530
531		if (dev_end <= dev_start)
532			continue;
 
 
533
534		rdev = conf->devlist[(zone - conf->strip_zone) *
535			conf->strip_zone[0].nb_dev + disk];
536		if (__blkdev_issue_discard(rdev->bdev,
537			dev_start + zone->dev_start + rdev->data_offset,
538			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
539		    !discard_bio)
540			continue;
541		bio_chain(discard_bio, bio);
542		bio_clone_blkg_association(discard_bio, bio);
543		if (mddev->gendisk)
544			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
545				discard_bio, disk_devt(mddev->gendisk),
546				bio->bi_iter.bi_sector);
547		submit_bio_noacct(discard_bio);
548	}
549	bio_endio(bio);
550}
551
552static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
553{
554	struct r0conf *conf = mddev->private;
555	struct strip_zone *zone;
556	struct md_rdev *tmp_dev;
557	sector_t bio_sector;
558	sector_t sector;
559	sector_t orig_sector;
560	unsigned chunk_sects;
561	unsigned sectors;
562
563	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
564	    && md_flush_request(mddev, bio))
565		return true;
566
567	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
568		raid0_handle_discard(mddev, bio);
569		return true;
570	}
571
572	bio_sector = bio->bi_iter.bi_sector;
573	sector = bio_sector;
574	chunk_sects = mddev->chunk_sectors;
575
576	sectors = chunk_sects -
577		(likely(is_power_of_2(chunk_sects))
578		 ? (sector & (chunk_sects-1))
579		 : sector_div(sector, chunk_sects));
580
581	/* Restore due to sector_div */
582	sector = bio_sector;
583
584	if (sectors < bio_sectors(bio)) {
585		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
586					      &mddev->bio_set);
587		bio_chain(split, bio);
588		submit_bio_noacct(bio);
589		bio = split;
590	}
591
592	orig_sector = sector;
593	zone = find_zone(mddev->private, &sector);
594	switch (conf->layout) {
595	case RAID0_ORIG_LAYOUT:
596		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
597		break;
598	case RAID0_ALT_MULTIZONE_LAYOUT:
599		tmp_dev = map_sector(mddev, zone, sector, &sector);
600		break;
601	default:
602		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
603		bio_io_error(bio);
604		return true;
605	}
606
607	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
608		bio_io_error(bio);
609		return true;
610	}
611
612	bio_set_dev(bio, tmp_dev->bdev);
613	bio->bi_iter.bi_sector = sector + zone->dev_start +
614		tmp_dev->data_offset;
615
616	if (mddev->gendisk)
617		trace_block_bio_remap(bio->bi_disk->queue, bio,
618				disk_devt(mddev->gendisk), bio_sector);
619	mddev_check_writesame(mddev, bio);
620	mddev_check_write_zeroes(mddev, bio);
621	submit_bio_noacct(bio);
622	return true;
623}
624
625static void raid0_status(struct seq_file *seq, struct mddev *mddev)
626{
627	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
628	return;
629}
630
631static void *raid0_takeover_raid45(struct mddev *mddev)
632{
633	struct md_rdev *rdev;
634	struct r0conf *priv_conf;
635
636	if (mddev->degraded != 1) {
637		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
638			mdname(mddev),
639			mddev->degraded);
640		return ERR_PTR(-EINVAL);
641	}
642
643	rdev_for_each(rdev, mddev) {
644		/* check slot number for a disk */
645		if (rdev->raid_disk == mddev->raid_disks-1) {
646			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
647				mdname(mddev));
648			return ERR_PTR(-EINVAL);
649		}
650		rdev->sectors = mddev->dev_sectors;
651	}
652
653	/* Set new parameters */
654	mddev->new_level = 0;
655	mddev->new_layout = 0;
656	mddev->new_chunk_sectors = mddev->chunk_sectors;
657	mddev->raid_disks--;
658	mddev->delta_disks = -1;
659	/* make sure it will be not marked as dirty */
660	mddev->recovery_cp = MaxSector;
661	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
662
663	create_strip_zones(mddev, &priv_conf);
664
665	return priv_conf;
666}
667
668static void *raid0_takeover_raid10(struct mddev *mddev)
669{
670	struct r0conf *priv_conf;
671
672	/* Check layout:
673	 *  - far_copies must be 1
674	 *  - near_copies must be 2
675	 *  - disks number must be even
676	 *  - all mirrors must be already degraded
677	 */
678	if (mddev->layout != ((1 << 8) + 2)) {
679		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
680			mdname(mddev),
681			mddev->layout);
682		return ERR_PTR(-EINVAL);
683	}
684	if (mddev->raid_disks & 1) {
685		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
686			mdname(mddev));
687		return ERR_PTR(-EINVAL);
688	}
689	if (mddev->degraded != (mddev->raid_disks>>1)) {
690		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
691			mdname(mddev));
692		return ERR_PTR(-EINVAL);
693	}
694
695	/* Set new parameters */
696	mddev->new_level = 0;
697	mddev->new_layout = 0;
698	mddev->new_chunk_sectors = mddev->chunk_sectors;
699	mddev->delta_disks = - mddev->raid_disks / 2;
700	mddev->raid_disks += mddev->delta_disks;
701	mddev->degraded = 0;
702	/* make sure it will be not marked as dirty */
703	mddev->recovery_cp = MaxSector;
704	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
705
706	create_strip_zones(mddev, &priv_conf);
707	return priv_conf;
708}
709
710static void *raid0_takeover_raid1(struct mddev *mddev)
711{
712	struct r0conf *priv_conf;
713	int chunksect;
714
715	/* Check layout:
716	 *  - (N - 1) mirror drives must be already faulty
717	 */
718	if ((mddev->raid_disks - 1) != mddev->degraded) {
719		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
720		       mdname(mddev));
721		return ERR_PTR(-EINVAL);
722	}
723
724	/*
725	 * a raid1 doesn't have the notion of chunk size, so
726	 * figure out the largest suitable size we can use.
727	 */
728	chunksect = 64 * 2; /* 64K by default */
729
730	/* The array must be an exact multiple of chunksize */
731	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
732		chunksect >>= 1;
733
734	if ((chunksect << 9) < PAGE_SIZE)
735		/* array size does not allow a suitable chunk size */
736		return ERR_PTR(-EINVAL);
737
738	/* Set new parameters */
739	mddev->new_level = 0;
740	mddev->new_layout = 0;
741	mddev->new_chunk_sectors = chunksect;
742	mddev->chunk_sectors = chunksect;
743	mddev->delta_disks = 1 - mddev->raid_disks;
744	mddev->raid_disks = 1;
745	/* make sure it will be not marked as dirty */
746	mddev->recovery_cp = MaxSector;
747	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
748
749	create_strip_zones(mddev, &priv_conf);
750	return priv_conf;
751}
752
753static void *raid0_takeover(struct mddev *mddev)
754{
755	/* raid0 can take over:
756	 *  raid4 - if all data disks are active.
757	 *  raid5 - providing it is Raid4 layout and one disk is faulty
758	 *  raid10 - assuming we have all necessary active disks
759	 *  raid1 - with (N -1) mirror drives faulty
760	 */
761
762	if (mddev->bitmap) {
763		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
764			mdname(mddev));
765		return ERR_PTR(-EBUSY);
766	}
767	if (mddev->level == 4)
768		return raid0_takeover_raid45(mddev);
769
770	if (mddev->level == 5) {
771		if (mddev->layout == ALGORITHM_PARITY_N)
772			return raid0_takeover_raid45(mddev);
773
774		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
775			mdname(mddev), ALGORITHM_PARITY_N);
776	}
777
778	if (mddev->level == 10)
779		return raid0_takeover_raid10(mddev);
780
781	if (mddev->level == 1)
782		return raid0_takeover_raid1(mddev);
783
784	pr_warn("Takeover from raid%i to raid0 not supported\n",
785		mddev->level);
786
787	return ERR_PTR(-EINVAL);
788}
789
790static void raid0_quiesce(struct mddev *mddev, int quiesce)
791{
792}
793
794static struct md_personality raid0_personality=
795{
796	.name		= "raid0",
797	.level		= 0,
798	.owner		= THIS_MODULE,
799	.make_request	= raid0_make_request,
800	.run		= raid0_run,
801	.free		= raid0_free,
802	.status		= raid0_status,
803	.size		= raid0_size,
804	.takeover	= raid0_takeover,
805	.quiesce	= raid0_quiesce,
 
806};
807
808static int __init raid0_init (void)
809{
810	return register_md_personality (&raid0_personality);
811}
812
813static void raid0_exit (void)
814{
815	unregister_md_personality (&raid0_personality);
816}
817
818module_init(raid0_init);
819module_exit(raid0_exit);
820MODULE_LICENSE("GPL");
821MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
822MODULE_ALIAS("md-personality-2"); /* RAID0 */
823MODULE_ALIAS("md-raid0");
824MODULE_ALIAS("md-level-0");
v4.10.11
 
  1/*
  2   raid0.c : Multiple Devices driver for Linux
  3	     Copyright (C) 1994-96 Marc ZYNGIER
  4	     <zyngier@ufr-info-p7.ibp.fr> or
  5	     <maz@gloups.fdn.fr>
  6	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  7
  8   RAID-0 management functions.
  9
 10   This program is free software; you can redistribute it and/or modify
 11   it under the terms of the GNU General Public License as published by
 12   the Free Software Foundation; either version 2, or (at your option)
 13   any later version.
 14
 15   You should have received a copy of the GNU General Public License
 16   (for example /usr/src/linux/COPYING); if not, write to the Free
 17   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 18*/
 19
 20#include <linux/blkdev.h>
 21#include <linux/seq_file.h>
 22#include <linux/module.h>
 23#include <linux/slab.h>
 24#include <trace/events/block.h>
 25#include "md.h"
 26#include "raid0.h"
 27#include "raid5.h"
 28
 
 
 
 29#define UNSUPPORTED_MDDEV_FLAGS		\
 30	((1L << MD_HAS_JOURNAL) |	\
 31	 (1L << MD_JOURNAL_CLEAN) |	\
 32	 (1L << MD_FAILFAST_SUPPORTED))
 33
 34static int raid0_congested(struct mddev *mddev, int bits)
 35{
 36	struct r0conf *conf = mddev->private;
 37	struct md_rdev **devlist = conf->devlist;
 38	int raid_disks = conf->strip_zone[0].nb_dev;
 39	int i, ret = 0;
 40
 41	for (i = 0; i < raid_disks && !ret ; i++) {
 42		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
 43
 44		ret |= bdi_congested(&q->backing_dev_info, bits);
 45	}
 46	return ret;
 47}
 48
 49/*
 50 * inform the user of the raid configuration
 51*/
 52static void dump_zones(struct mddev *mddev)
 53{
 54	int j, k;
 55	sector_t zone_size = 0;
 56	sector_t zone_start = 0;
 57	char b[BDEVNAME_SIZE];
 58	struct r0conf *conf = mddev->private;
 59	int raid_disks = conf->strip_zone[0].nb_dev;
 60	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
 61		 mdname(mddev),
 62		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 63	for (j = 0; j < conf->nr_strip_zones; j++) {
 64		char line[200];
 65		int len = 0;
 66
 67		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 68			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
 69					bdevname(conf->devlist[j*raid_disks
 70							       + k]->bdev, b));
 71		pr_debug("md: zone%d=[%s]\n", j, line);
 72
 73		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 74		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 75			(unsigned long long)zone_start>>1,
 76			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 77			(unsigned long long)zone_size>>1);
 78		zone_start = conf->strip_zone[j].zone_end;
 79	}
 80}
 81
 82static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 83{
 84	int i, c, err;
 85	sector_t curr_zone_end, sectors;
 86	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 87	struct strip_zone *zone;
 88	int cnt;
 89	char b[BDEVNAME_SIZE];
 90	char b2[BDEVNAME_SIZE];
 91	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 92	unsigned short blksize = 512;
 93
 94	*private_conf = ERR_PTR(-ENOMEM);
 95	if (!conf)
 96		return -ENOMEM;
 97	rdev_for_each(rdev1, mddev) {
 98		pr_debug("md/raid0:%s: looking at %s\n",
 99			 mdname(mddev),
100			 bdevname(rdev1->bdev, b));
101		c = 0;
102
103		/* round size to chunk_size */
104		sectors = rdev1->sectors;
105		sector_div(sectors, mddev->chunk_sectors);
106		rdev1->sectors = sectors * mddev->chunk_sectors;
107
108		blksize = max(blksize, queue_logical_block_size(
109				      rdev1->bdev->bd_disk->queue));
110
111		rdev_for_each(rdev2, mddev) {
112			pr_debug("md/raid0:%s:   comparing %s(%llu)"
113				 " with %s(%llu)\n",
114				 mdname(mddev),
115				 bdevname(rdev1->bdev,b),
116				 (unsigned long long)rdev1->sectors,
117				 bdevname(rdev2->bdev,b2),
118				 (unsigned long long)rdev2->sectors);
119			if (rdev2 == rdev1) {
120				pr_debug("md/raid0:%s:   END\n",
121					 mdname(mddev));
122				break;
123			}
124			if (rdev2->sectors == rdev1->sectors) {
125				/*
126				 * Not unique, don't count it as a new
127				 * group
128				 */
129				pr_debug("md/raid0:%s:   EQUAL\n",
130					 mdname(mddev));
131				c = 1;
132				break;
133			}
134			pr_debug("md/raid0:%s:   NOT EQUAL\n",
135				 mdname(mddev));
136		}
137		if (!c) {
138			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
139				 mdname(mddev));
140			conf->nr_strip_zones++;
141			pr_debug("md/raid0:%s: %d zones\n",
142				 mdname(mddev), conf->nr_strip_zones);
143		}
144	}
145	pr_debug("md/raid0:%s: FINAL %d zones\n",
146		 mdname(mddev), conf->nr_strip_zones);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147	/*
148	 * now since we have the hard sector sizes, we can make sure
149	 * chunk size is a multiple of that sector size
150	 */
151	if ((mddev->chunk_sectors << 9) % blksize) {
152		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
153			mdname(mddev),
154			mddev->chunk_sectors << 9, blksize);
155		err = -EINVAL;
156		goto abort;
157	}
158
159	err = -ENOMEM;
160	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
161				conf->nr_strip_zones, GFP_KERNEL);
 
162	if (!conf->strip_zone)
163		goto abort;
164	conf->devlist = kzalloc(sizeof(struct md_rdev*)*
165				conf->nr_strip_zones*mddev->raid_disks,
 
166				GFP_KERNEL);
167	if (!conf->devlist)
168		goto abort;
169
170	/* The first zone must contain all devices, so here we check that
171	 * there is a proper alignment of slots to devices and find them all
172	 */
173	zone = &conf->strip_zone[0];
174	cnt = 0;
175	smallest = NULL;
176	dev = conf->devlist;
177	err = -EINVAL;
178	rdev_for_each(rdev1, mddev) {
179		int j = rdev1->raid_disk;
180
181		if (mddev->level == 10) {
182			/* taking over a raid10-n2 array */
183			j /= 2;
184			rdev1->new_raid_disk = j;
185		}
186
187		if (mddev->level == 1) {
188			/* taiking over a raid1 array-
189			 * we have only one active disk
190			 */
191			j = 0;
192			rdev1->new_raid_disk = j;
193		}
194
195		if (j < 0) {
196			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
197				mdname(mddev));
198			goto abort;
199		}
200		if (j >= mddev->raid_disks) {
201			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
202				mdname(mddev), j);
203			goto abort;
204		}
205		if (dev[j]) {
206			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
207				mdname(mddev), j);
208			goto abort;
209		}
210		dev[j] = rdev1;
211
212		if (!smallest || (rdev1->sectors < smallest->sectors))
213			smallest = rdev1;
214		cnt++;
215	}
216	if (cnt != mddev->raid_disks) {
217		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
218			mdname(mddev), cnt, mddev->raid_disks);
219		goto abort;
220	}
221	zone->nb_dev = cnt;
222	zone->zone_end = smallest->sectors * cnt;
223
224	curr_zone_end = zone->zone_end;
225
226	/* now do the other zones */
227	for (i = 1; i < conf->nr_strip_zones; i++)
228	{
229		int j;
230
231		zone = conf->strip_zone + i;
232		dev = conf->devlist + i * mddev->raid_disks;
233
234		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
235		zone->dev_start = smallest->sectors;
236		smallest = NULL;
237		c = 0;
238
239		for (j=0; j<cnt; j++) {
240			rdev = conf->devlist[j];
241			if (rdev->sectors <= zone->dev_start) {
242				pr_debug("md/raid0:%s: checking %s ... nope\n",
243					 mdname(mddev),
244					 bdevname(rdev->bdev, b));
245				continue;
246			}
247			pr_debug("md/raid0:%s: checking %s ..."
248				 " contained as device %d\n",
249				 mdname(mddev),
250				 bdevname(rdev->bdev, b), c);
251			dev[c] = rdev;
252			c++;
253			if (!smallest || rdev->sectors < smallest->sectors) {
254				smallest = rdev;
255				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
256					 mdname(mddev),
257					 (unsigned long long)rdev->sectors);
258			}
259		}
260
261		zone->nb_dev = c;
262		sectors = (smallest->sectors - zone->dev_start) * c;
263		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
264			 mdname(mddev),
265			 zone->nb_dev, (unsigned long long)sectors);
266
267		curr_zone_end += sectors;
268		zone->zone_end = curr_zone_end;
269
270		pr_debug("md/raid0:%s: current zone start: %llu\n",
271			 mdname(mddev),
272			 (unsigned long long)smallest->sectors);
273	}
274
275	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
276	*private_conf = conf;
277
278	return 0;
279abort:
280	kfree(conf->strip_zone);
281	kfree(conf->devlist);
282	kfree(conf);
283	*private_conf = ERR_PTR(err);
284	return err;
285}
286
287/* Find the zone which holds a particular offset
288 * Update *sectorp to be an offset in that zone
289 */
290static struct strip_zone *find_zone(struct r0conf *conf,
291				    sector_t *sectorp)
292{
293	int i;
294	struct strip_zone *z = conf->strip_zone;
295	sector_t sector = *sectorp;
296
297	for (i = 0; i < conf->nr_strip_zones; i++)
298		if (sector < z[i].zone_end) {
299			if (i)
300				*sectorp = sector - z[i-1].zone_end;
301			return z + i;
302		}
303	BUG();
304}
305
306/*
307 * remaps the bio to the target device. we separate two flows.
308 * power 2 flow and a general flow for the sake of performance
309*/
310static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
311				sector_t sector, sector_t *sector_offset)
312{
313	unsigned int sect_in_chunk;
314	sector_t chunk;
315	struct r0conf *conf = mddev->private;
316	int raid_disks = conf->strip_zone[0].nb_dev;
317	unsigned int chunk_sects = mddev->chunk_sectors;
318
319	if (is_power_of_2(chunk_sects)) {
320		int chunksect_bits = ffz(~chunk_sects);
321		/* find the sector offset inside the chunk */
322		sect_in_chunk  = sector & (chunk_sects - 1);
323		sector >>= chunksect_bits;
324		/* chunk in zone */
325		chunk = *sector_offset;
326		/* quotient is the chunk in real device*/
327		sector_div(chunk, zone->nb_dev << chunksect_bits);
328	} else{
329		sect_in_chunk = sector_div(sector, chunk_sects);
330		chunk = *sector_offset;
331		sector_div(chunk, chunk_sects * zone->nb_dev);
332	}
333	/*
334	*  position the bio over the real device
335	*  real sector = chunk in device + starting of zone
336	*	+ the position in the chunk
337	*/
338	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
339	return conf->devlist[(zone - conf->strip_zone)*raid_disks
340			     + sector_div(sector, zone->nb_dev)];
341}
342
343static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
344{
345	sector_t array_sectors = 0;
346	struct md_rdev *rdev;
347
348	WARN_ONCE(sectors || raid_disks,
349		  "%s does not support generic reshape\n", __func__);
350
351	rdev_for_each(rdev, mddev)
352		array_sectors += (rdev->sectors &
353				  ~(sector_t)(mddev->chunk_sectors-1));
354
355	return array_sectors;
356}
357
358static void raid0_free(struct mddev *mddev, void *priv);
359
360static int raid0_run(struct mddev *mddev)
361{
362	struct r0conf *conf;
363	int ret;
364
365	if (mddev->chunk_sectors == 0) {
366		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
367		return -EINVAL;
368	}
369	if (md_check_no_bitmap(mddev))
370		return -EINVAL;
371
372	/* if private is not null, we are here after takeover */
373	if (mddev->private == NULL) {
374		ret = create_strip_zones(mddev, &conf);
375		if (ret < 0)
376			return ret;
377		mddev->private = conf;
378	}
379	conf = mddev->private;
380	if (mddev->queue) {
381		struct md_rdev *rdev;
382		bool discard_supported = false;
383
384		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
385		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
386		blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
 
387
388		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
389		blk_queue_io_opt(mddev->queue,
390				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
391
392		rdev_for_each(rdev, mddev) {
393			disk_stack_limits(mddev->gendisk, rdev->bdev,
394					  rdev->data_offset << 9);
395			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
396				discard_supported = true;
397		}
398		if (!discard_supported)
399			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
400		else
401			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
402	}
403
404	/* calculate array device size */
405	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
406
407	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
408		 mdname(mddev),
409		 (unsigned long long)mddev->array_sectors);
410
411	if (mddev->queue) {
412		/* calculate the max read-ahead size.
413		 * For read-ahead of large files to be effective, we need to
414		 * readahead at least twice a whole stripe. i.e. number of devices
415		 * multiplied by chunk size times 2.
416		 * If an individual device has an ra_pages greater than the
417		 * chunk size, then we will not drive that device as hard as it
418		 * wants.  We consider this a configuration error: a larger
419		 * chunksize should be used in that case.
420		 */
421		int stripe = mddev->raid_disks *
422			(mddev->chunk_sectors << 9) / PAGE_SIZE;
423		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
424			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
425	}
426
427	dump_zones(mddev);
428
429	ret = md_integrity_register(mddev);
430
431	return ret;
432}
433
434static void raid0_free(struct mddev *mddev, void *priv)
435{
436	struct r0conf *conf = priv;
437
438	kfree(conf->strip_zone);
439	kfree(conf->devlist);
440	kfree(conf);
441}
442
443/*
444 * Is io distribute over 1 or more chunks ?
445*/
446static inline int is_io_in_chunk_boundary(struct mddev *mddev,
447			unsigned int chunk_sects, struct bio *bio)
448{
449	if (likely(is_power_of_2(chunk_sects))) {
450		return chunk_sects >=
451			((bio->bi_iter.bi_sector & (chunk_sects-1))
452					+ bio_sectors(bio));
453	} else{
454		sector_t sector = bio->bi_iter.bi_sector;
455		return chunk_sects >= (sector_div(sector, chunk_sects)
456						+ bio_sectors(bio));
457	}
458}
459
460static void raid0_make_request(struct mddev *mddev, struct bio *bio)
461{
 
462	struct strip_zone *zone;
463	struct md_rdev *tmp_dev;
464	struct bio *split;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465
466	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
467		md_flush_request(mddev, bio);
468		return;
469	}
 
 
 
470
471	do {
472		sector_t bio_sector = bio->bi_iter.bi_sector;
473		sector_t sector = bio_sector;
474		unsigned chunk_sects = mddev->chunk_sectors;
 
 
475
476		unsigned sectors = chunk_sects -
477			(likely(is_power_of_2(chunk_sects))
478			 ? (sector & (chunk_sects-1))
479			 : sector_div(sector, chunk_sects));
480
481		/* Restore due to sector_div */
482		sector = bio_sector;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483
484		if (sectors < bio_sectors(bio)) {
485			split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
486			bio_chain(split, bio);
487		} else {
488			split = bio;
489		}
490
491		zone = find_zone(mddev->private, &sector);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
492		tmp_dev = map_sector(mddev, zone, sector, &sector);
493		split->bi_bdev = tmp_dev->bdev;
494		split->bi_iter.bi_sector = sector + zone->dev_start +
495			tmp_dev->data_offset;
496
497		if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
498			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
499			/* Just ignore it */
500			bio_endio(split);
501		} else {
502			if (mddev->gendisk)
503				trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
504						      split, disk_devt(mddev->gendisk),
505						      bio_sector);
506			generic_make_request(split);
507		}
508	} while (split != bio);
 
 
 
 
 
 
 
509}
510
511static void raid0_status(struct seq_file *seq, struct mddev *mddev)
512{
513	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
514	return;
515}
516
517static void *raid0_takeover_raid45(struct mddev *mddev)
518{
519	struct md_rdev *rdev;
520	struct r0conf *priv_conf;
521
522	if (mddev->degraded != 1) {
523		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
524			mdname(mddev),
525			mddev->degraded);
526		return ERR_PTR(-EINVAL);
527	}
528
529	rdev_for_each(rdev, mddev) {
530		/* check slot number for a disk */
531		if (rdev->raid_disk == mddev->raid_disks-1) {
532			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
533				mdname(mddev));
534			return ERR_PTR(-EINVAL);
535		}
536		rdev->sectors = mddev->dev_sectors;
537	}
538
539	/* Set new parameters */
540	mddev->new_level = 0;
541	mddev->new_layout = 0;
542	mddev->new_chunk_sectors = mddev->chunk_sectors;
543	mddev->raid_disks--;
544	mddev->delta_disks = -1;
545	/* make sure it will be not marked as dirty */
546	mddev->recovery_cp = MaxSector;
547	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
548
549	create_strip_zones(mddev, &priv_conf);
550
551	return priv_conf;
552}
553
554static void *raid0_takeover_raid10(struct mddev *mddev)
555{
556	struct r0conf *priv_conf;
557
558	/* Check layout:
559	 *  - far_copies must be 1
560	 *  - near_copies must be 2
561	 *  - disks number must be even
562	 *  - all mirrors must be already degraded
563	 */
564	if (mddev->layout != ((1 << 8) + 2)) {
565		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
566			mdname(mddev),
567			mddev->layout);
568		return ERR_PTR(-EINVAL);
569	}
570	if (mddev->raid_disks & 1) {
571		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
572			mdname(mddev));
573		return ERR_PTR(-EINVAL);
574	}
575	if (mddev->degraded != (mddev->raid_disks>>1)) {
576		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
577			mdname(mddev));
578		return ERR_PTR(-EINVAL);
579	}
580
581	/* Set new parameters */
582	mddev->new_level = 0;
583	mddev->new_layout = 0;
584	mddev->new_chunk_sectors = mddev->chunk_sectors;
585	mddev->delta_disks = - mddev->raid_disks / 2;
586	mddev->raid_disks += mddev->delta_disks;
587	mddev->degraded = 0;
588	/* make sure it will be not marked as dirty */
589	mddev->recovery_cp = MaxSector;
590	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
591
592	create_strip_zones(mddev, &priv_conf);
593	return priv_conf;
594}
595
596static void *raid0_takeover_raid1(struct mddev *mddev)
597{
598	struct r0conf *priv_conf;
599	int chunksect;
600
601	/* Check layout:
602	 *  - (N - 1) mirror drives must be already faulty
603	 */
604	if ((mddev->raid_disks - 1) != mddev->degraded) {
605		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
606		       mdname(mddev));
607		return ERR_PTR(-EINVAL);
608	}
609
610	/*
611	 * a raid1 doesn't have the notion of chunk size, so
612	 * figure out the largest suitable size we can use.
613	 */
614	chunksect = 64 * 2; /* 64K by default */
615
616	/* The array must be an exact multiple of chunksize */
617	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
618		chunksect >>= 1;
619
620	if ((chunksect << 9) < PAGE_SIZE)
621		/* array size does not allow a suitable chunk size */
622		return ERR_PTR(-EINVAL);
623
624	/* Set new parameters */
625	mddev->new_level = 0;
626	mddev->new_layout = 0;
627	mddev->new_chunk_sectors = chunksect;
628	mddev->chunk_sectors = chunksect;
629	mddev->delta_disks = 1 - mddev->raid_disks;
630	mddev->raid_disks = 1;
631	/* make sure it will be not marked as dirty */
632	mddev->recovery_cp = MaxSector;
633	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
634
635	create_strip_zones(mddev, &priv_conf);
636	return priv_conf;
637}
638
639static void *raid0_takeover(struct mddev *mddev)
640{
641	/* raid0 can take over:
642	 *  raid4 - if all data disks are active.
643	 *  raid5 - providing it is Raid4 layout and one disk is faulty
644	 *  raid10 - assuming we have all necessary active disks
645	 *  raid1 - with (N -1) mirror drives faulty
646	 */
647
648	if (mddev->bitmap) {
649		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
650			mdname(mddev));
651		return ERR_PTR(-EBUSY);
652	}
653	if (mddev->level == 4)
654		return raid0_takeover_raid45(mddev);
655
656	if (mddev->level == 5) {
657		if (mddev->layout == ALGORITHM_PARITY_N)
658			return raid0_takeover_raid45(mddev);
659
660		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
661			mdname(mddev), ALGORITHM_PARITY_N);
662	}
663
664	if (mddev->level == 10)
665		return raid0_takeover_raid10(mddev);
666
667	if (mddev->level == 1)
668		return raid0_takeover_raid1(mddev);
669
670	pr_warn("Takeover from raid%i to raid0 not supported\n",
671		mddev->level);
672
673	return ERR_PTR(-EINVAL);
674}
675
676static void raid0_quiesce(struct mddev *mddev, int state)
677{
678}
679
680static struct md_personality raid0_personality=
681{
682	.name		= "raid0",
683	.level		= 0,
684	.owner		= THIS_MODULE,
685	.make_request	= raid0_make_request,
686	.run		= raid0_run,
687	.free		= raid0_free,
688	.status		= raid0_status,
689	.size		= raid0_size,
690	.takeover	= raid0_takeover,
691	.quiesce	= raid0_quiesce,
692	.congested	= raid0_congested,
693};
694
695static int __init raid0_init (void)
696{
697	return register_md_personality (&raid0_personality);
698}
699
700static void raid0_exit (void)
701{
702	unregister_md_personality (&raid0_personality);
703}
704
705module_init(raid0_init);
706module_exit(raid0_exit);
707MODULE_LICENSE("GPL");
708MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
709MODULE_ALIAS("md-personality-2"); /* RAID0 */
710MODULE_ALIAS("md-raid0");
711MODULE_ALIAS("md-level-0");