Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2   raid0.c : Multiple Devices driver for Linux
  3	     Copyright (C) 1994-96 Marc ZYNGIER
  4	     <zyngier@ufr-info-p7.ibp.fr> or
  5	     <maz@gloups.fdn.fr>
  6	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  7
  8   RAID-0 management functions.
  9
 10   This program is free software; you can redistribute it and/or modify
 11   it under the terms of the GNU General Public License as published by
 12   the Free Software Foundation; either version 2, or (at your option)
 13   any later version.
 14
 15   You should have received a copy of the GNU General Public License
 16   (for example /usr/src/linux/COPYING); if not, write to the Free
 17   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 18*/
 19
 20#include <linux/blkdev.h>
 21#include <linux/seq_file.h>
 22#include <linux/module.h>
 23#include <linux/slab.h>
 
 24#include "md.h"
 25#include "raid0.h"
 26#include "raid5.h"
 27
 
 
 
 
 
 
 
 28static int raid0_congested(struct mddev *mddev, int bits)
 29{
 30	struct r0conf *conf = mddev->private;
 31	struct md_rdev **devlist = conf->devlist;
 32	int raid_disks = conf->strip_zone[0].nb_dev;
 33	int i, ret = 0;
 34
 35	for (i = 0; i < raid_disks && !ret ; i++) {
 36		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
 37
 38		ret |= bdi_congested(&q->backing_dev_info, bits);
 39	}
 40	return ret;
 41}
 42
 43/*
 44 * inform the user of the raid configuration
 45*/
 46static void dump_zones(struct mddev *mddev)
 47{
 48	int j, k;
 49	sector_t zone_size = 0;
 50	sector_t zone_start = 0;
 51	char b[BDEVNAME_SIZE];
 52	struct r0conf *conf = mddev->private;
 53	int raid_disks = conf->strip_zone[0].nb_dev;
 54	printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
 55	       mdname(mddev),
 56	       conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 57	for (j = 0; j < conf->nr_strip_zones; j++) {
 58		printk(KERN_INFO "md: zone%d=[", j);
 
 
 59		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 60			printk(KERN_CONT "%s%s", k?"/":"",
 61			bdevname(conf->devlist[j*raid_disks
 62						+ k]->bdev, b));
 63		printk(KERN_CONT "]\n");
 64
 65		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 66		printk(KERN_INFO "      zone-offset=%10lluKB, "
 67				"device-offset=%10lluKB, size=%10lluKB\n",
 68			(unsigned long long)zone_start>>1,
 69			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 70			(unsigned long long)zone_size>>1);
 71		zone_start = conf->strip_zone[j].zone_end;
 72	}
 73}
 74
 75static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 76{
 77	int i, c, err;
 78	sector_t curr_zone_end, sectors;
 79	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 80	struct strip_zone *zone;
 81	int cnt;
 82	char b[BDEVNAME_SIZE];
 83	char b2[BDEVNAME_SIZE];
 84	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 85	unsigned short blksize = 512;
 86
 87	*private_conf = ERR_PTR(-ENOMEM);
 88	if (!conf)
 89		return -ENOMEM;
 90	rdev_for_each(rdev1, mddev) {
 91		pr_debug("md/raid0:%s: looking at %s\n",
 92			 mdname(mddev),
 93			 bdevname(rdev1->bdev, b));
 94		c = 0;
 95
 96		/* round size to chunk_size */
 97		sectors = rdev1->sectors;
 98		sector_div(sectors, mddev->chunk_sectors);
 99		rdev1->sectors = sectors * mddev->chunk_sectors;
100
101		blksize = max(blksize, queue_logical_block_size(
102				      rdev1->bdev->bd_disk->queue));
103
104		rdev_for_each(rdev2, mddev) {
105			pr_debug("md/raid0:%s:   comparing %s(%llu)"
106				 " with %s(%llu)\n",
107				 mdname(mddev),
108				 bdevname(rdev1->bdev,b),
109				 (unsigned long long)rdev1->sectors,
110				 bdevname(rdev2->bdev,b2),
111				 (unsigned long long)rdev2->sectors);
112			if (rdev2 == rdev1) {
113				pr_debug("md/raid0:%s:   END\n",
114					 mdname(mddev));
115				break;
116			}
117			if (rdev2->sectors == rdev1->sectors) {
118				/*
119				 * Not unique, don't count it as a new
120				 * group
121				 */
122				pr_debug("md/raid0:%s:   EQUAL\n",
123					 mdname(mddev));
124				c = 1;
125				break;
126			}
127			pr_debug("md/raid0:%s:   NOT EQUAL\n",
128				 mdname(mddev));
129		}
130		if (!c) {
131			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
132				 mdname(mddev));
133			conf->nr_strip_zones++;
134			pr_debug("md/raid0:%s: %d zones\n",
135				 mdname(mddev), conf->nr_strip_zones);
136		}
137	}
138	pr_debug("md/raid0:%s: FINAL %d zones\n",
139		 mdname(mddev), conf->nr_strip_zones);
140	/*
141	 * now since we have the hard sector sizes, we can make sure
142	 * chunk size is a multiple of that sector size
143	 */
144	if ((mddev->chunk_sectors << 9) % blksize) {
145		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
146		       mdname(mddev),
147		       mddev->chunk_sectors << 9, blksize);
148		err = -EINVAL;
149		goto abort;
150	}
151
152	err = -ENOMEM;
153	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
154				conf->nr_strip_zones, GFP_KERNEL);
155	if (!conf->strip_zone)
156		goto abort;
157	conf->devlist = kzalloc(sizeof(struct md_rdev*)*
158				conf->nr_strip_zones*mddev->raid_disks,
159				GFP_KERNEL);
160	if (!conf->devlist)
161		goto abort;
162
163	/* The first zone must contain all devices, so here we check that
164	 * there is a proper alignment of slots to devices and find them all
165	 */
166	zone = &conf->strip_zone[0];
167	cnt = 0;
168	smallest = NULL;
169	dev = conf->devlist;
170	err = -EINVAL;
171	rdev_for_each(rdev1, mddev) {
172		int j = rdev1->raid_disk;
173
174		if (mddev->level == 10) {
175			/* taking over a raid10-n2 array */
176			j /= 2;
177			rdev1->new_raid_disk = j;
178		}
179
180		if (mddev->level == 1) {
181			/* taiking over a raid1 array-
182			 * we have only one active disk
183			 */
184			j = 0;
185			rdev1->new_raid_disk = j;
186		}
187
188		if (j < 0) {
189			printk(KERN_ERR
190			       "md/raid0:%s: remove inactive devices before converting to RAID0\n",
191			       mdname(mddev));
192			goto abort;
193		}
194		if (j >= mddev->raid_disks) {
195			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
196			       "aborting!\n", mdname(mddev), j);
197			goto abort;
198		}
199		if (dev[j]) {
200			printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
201			       "aborting!\n", mdname(mddev), j);
202			goto abort;
203		}
204		dev[j] = rdev1;
205
206		if (!smallest || (rdev1->sectors < smallest->sectors))
207			smallest = rdev1;
208		cnt++;
209	}
210	if (cnt != mddev->raid_disks) {
211		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
212		       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
213		goto abort;
214	}
215	zone->nb_dev = cnt;
216	zone->zone_end = smallest->sectors * cnt;
217
218	curr_zone_end = zone->zone_end;
219
220	/* now do the other zones */
221	for (i = 1; i < conf->nr_strip_zones; i++)
222	{
223		int j;
224
225		zone = conf->strip_zone + i;
226		dev = conf->devlist + i * mddev->raid_disks;
227
228		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
229		zone->dev_start = smallest->sectors;
230		smallest = NULL;
231		c = 0;
232
233		for (j=0; j<cnt; j++) {
234			rdev = conf->devlist[j];
235			if (rdev->sectors <= zone->dev_start) {
236				pr_debug("md/raid0:%s: checking %s ... nope\n",
237					 mdname(mddev),
238					 bdevname(rdev->bdev, b));
239				continue;
240			}
241			pr_debug("md/raid0:%s: checking %s ..."
242				 " contained as device %d\n",
243				 mdname(mddev),
244				 bdevname(rdev->bdev, b), c);
245			dev[c] = rdev;
246			c++;
247			if (!smallest || rdev->sectors < smallest->sectors) {
248				smallest = rdev;
249				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
250					 mdname(mddev),
251					 (unsigned long long)rdev->sectors);
252			}
253		}
254
255		zone->nb_dev = c;
256		sectors = (smallest->sectors - zone->dev_start) * c;
257		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
258			 mdname(mddev),
259			 zone->nb_dev, (unsigned long long)sectors);
260
261		curr_zone_end += sectors;
262		zone->zone_end = curr_zone_end;
263
264		pr_debug("md/raid0:%s: current zone start: %llu\n",
265			 mdname(mddev),
266			 (unsigned long long)smallest->sectors);
267	}
268
269	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
270	*private_conf = conf;
271
272	return 0;
273abort:
274	kfree(conf->strip_zone);
275	kfree(conf->devlist);
276	kfree(conf);
277	*private_conf = ERR_PTR(err);
278	return err;
279}
280
281/* Find the zone which holds a particular offset
282 * Update *sectorp to be an offset in that zone
283 */
284static struct strip_zone *find_zone(struct r0conf *conf,
285				    sector_t *sectorp)
286{
287	int i;
288	struct strip_zone *z = conf->strip_zone;
289	sector_t sector = *sectorp;
290
291	for (i = 0; i < conf->nr_strip_zones; i++)
292		if (sector < z[i].zone_end) {
293			if (i)
294				*sectorp = sector - z[i-1].zone_end;
295			return z + i;
296		}
297	BUG();
298}
299
300/*
301 * remaps the bio to the target device. we separate two flows.
302 * power 2 flow and a general flow for the sake of performance
303*/
304static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
305				sector_t sector, sector_t *sector_offset)
306{
307	unsigned int sect_in_chunk;
308	sector_t chunk;
309	struct r0conf *conf = mddev->private;
310	int raid_disks = conf->strip_zone[0].nb_dev;
311	unsigned int chunk_sects = mddev->chunk_sectors;
312
313	if (is_power_of_2(chunk_sects)) {
314		int chunksect_bits = ffz(~chunk_sects);
315		/* find the sector offset inside the chunk */
316		sect_in_chunk  = sector & (chunk_sects - 1);
317		sector >>= chunksect_bits;
318		/* chunk in zone */
319		chunk = *sector_offset;
320		/* quotient is the chunk in real device*/
321		sector_div(chunk, zone->nb_dev << chunksect_bits);
322	} else{
323		sect_in_chunk = sector_div(sector, chunk_sects);
324		chunk = *sector_offset;
325		sector_div(chunk, chunk_sects * zone->nb_dev);
326	}
327	/*
328	*  position the bio over the real device
329	*  real sector = chunk in device + starting of zone
330	*	+ the position in the chunk
331	*/
332	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
333	return conf->devlist[(zone - conf->strip_zone)*raid_disks
334			     + sector_div(sector, zone->nb_dev)];
335}
336
337static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
338{
339	sector_t array_sectors = 0;
340	struct md_rdev *rdev;
341
342	WARN_ONCE(sectors || raid_disks,
343		  "%s does not support generic reshape\n", __func__);
344
345	rdev_for_each(rdev, mddev)
346		array_sectors += (rdev->sectors &
347				  ~(sector_t)(mddev->chunk_sectors-1));
348
349	return array_sectors;
350}
351
352static void raid0_free(struct mddev *mddev, void *priv);
353
354static int raid0_run(struct mddev *mddev)
355{
356	struct r0conf *conf;
357	int ret;
358
359	if (mddev->chunk_sectors == 0) {
360		printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
361		       mdname(mddev));
362		return -EINVAL;
363	}
364	if (md_check_no_bitmap(mddev))
365		return -EINVAL;
366
367	/* if private is not null, we are here after takeover */
368	if (mddev->private == NULL) {
369		ret = create_strip_zones(mddev, &conf);
370		if (ret < 0)
371			return ret;
372		mddev->private = conf;
373	}
374	conf = mddev->private;
375	if (mddev->queue) {
376		struct md_rdev *rdev;
377		bool discard_supported = false;
378
379		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
380		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
381		blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
 
382
383		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
384		blk_queue_io_opt(mddev->queue,
385				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
386
387		rdev_for_each(rdev, mddev) {
388			disk_stack_limits(mddev->gendisk, rdev->bdev,
389					  rdev->data_offset << 9);
390			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
391				discard_supported = true;
392		}
393		if (!discard_supported)
394			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
395		else
396			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
397	}
398
399	/* calculate array device size */
400	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
401
402	printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
403	       mdname(mddev),
404	       (unsigned long long)mddev->array_sectors);
405
406	if (mddev->queue) {
407		/* calculate the max read-ahead size.
408		 * For read-ahead of large files to be effective, we need to
409		 * readahead at least twice a whole stripe. i.e. number of devices
410		 * multiplied by chunk size times 2.
411		 * If an individual device has an ra_pages greater than the
412		 * chunk size, then we will not drive that device as hard as it
413		 * wants.  We consider this a configuration error: a larger
414		 * chunksize should be used in that case.
415		 */
416		int stripe = mddev->raid_disks *
417			(mddev->chunk_sectors << 9) / PAGE_SIZE;
418		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
419			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
420	}
421
422	dump_zones(mddev);
423
424	ret = md_integrity_register(mddev);
425
426	return ret;
427}
428
429static void raid0_free(struct mddev *mddev, void *priv)
430{
431	struct r0conf *conf = priv;
432
433	kfree(conf->strip_zone);
434	kfree(conf->devlist);
435	kfree(conf);
436}
437
438/*
439 * Is io distribute over 1 or more chunks ?
440*/
441static inline int is_io_in_chunk_boundary(struct mddev *mddev,
442			unsigned int chunk_sects, struct bio *bio)
443{
444	if (likely(is_power_of_2(chunk_sects))) {
445		return chunk_sects >=
446			((bio->bi_iter.bi_sector & (chunk_sects-1))
447					+ bio_sectors(bio));
448	} else{
449		sector_t sector = bio->bi_iter.bi_sector;
450		return chunk_sects >= (sector_div(sector, chunk_sects)
451						+ bio_sectors(bio));
452	}
453}
454
455static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456{
457	struct strip_zone *zone;
458	struct md_rdev *tmp_dev;
459	struct bio *split;
 
 
 
460
461	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
462		md_flush_request(mddev, bio);
463		return;
464	}
465
466	do {
467		sector_t sector = bio->bi_iter.bi_sector;
468		unsigned chunk_sects = mddev->chunk_sectors;
469
470		unsigned sectors = chunk_sects -
471			(likely(is_power_of_2(chunk_sects))
472			 ? (sector & (chunk_sects-1))
473			 : sector_div(sector, chunk_sects));
474
475		/* Restore due to sector_div */
476		sector = bio->bi_iter.bi_sector;
477
478		if (sectors < bio_sectors(bio)) {
479			split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
480			bio_chain(split, bio);
481		} else {
482			split = bio;
483		}
484
485		zone = find_zone(mddev->private, &sector);
486		tmp_dev = map_sector(mddev, zone, sector, &sector);
487		split->bi_bdev = tmp_dev->bdev;
488		split->bi_iter.bi_sector = sector + zone->dev_start +
489			tmp_dev->data_offset;
490
491		if (unlikely((split->bi_rw & REQ_DISCARD) &&
492			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
493			/* Just ignore it */
494			bio_endio(split);
495		} else
496			generic_make_request(split);
497	} while (split != bio);
 
 
 
 
 
498}
499
500static void raid0_status(struct seq_file *seq, struct mddev *mddev)
501{
502	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
503	return;
504}
505
506static void *raid0_takeover_raid45(struct mddev *mddev)
507{
508	struct md_rdev *rdev;
509	struct r0conf *priv_conf;
510
511	if (mddev->degraded != 1) {
512		printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
513		       mdname(mddev),
514		       mddev->degraded);
515		return ERR_PTR(-EINVAL);
516	}
517
518	rdev_for_each(rdev, mddev) {
519		/* check slot number for a disk */
520		if (rdev->raid_disk == mddev->raid_disks-1) {
521			printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
522			       mdname(mddev));
523			return ERR_PTR(-EINVAL);
524		}
525		rdev->sectors = mddev->dev_sectors;
526	}
527
528	/* Set new parameters */
529	mddev->new_level = 0;
530	mddev->new_layout = 0;
531	mddev->new_chunk_sectors = mddev->chunk_sectors;
532	mddev->raid_disks--;
533	mddev->delta_disks = -1;
534	/* make sure it will be not marked as dirty */
535	mddev->recovery_cp = MaxSector;
 
536
537	create_strip_zones(mddev, &priv_conf);
 
538	return priv_conf;
539}
540
541static void *raid0_takeover_raid10(struct mddev *mddev)
542{
543	struct r0conf *priv_conf;
544
545	/* Check layout:
546	 *  - far_copies must be 1
547	 *  - near_copies must be 2
548	 *  - disks number must be even
549	 *  - all mirrors must be already degraded
550	 */
551	if (mddev->layout != ((1 << 8) + 2)) {
552		printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
553		       mdname(mddev),
554		       mddev->layout);
555		return ERR_PTR(-EINVAL);
556	}
557	if (mddev->raid_disks & 1) {
558		printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
559		       mdname(mddev));
560		return ERR_PTR(-EINVAL);
561	}
562	if (mddev->degraded != (mddev->raid_disks>>1)) {
563		printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
564		       mdname(mddev));
565		return ERR_PTR(-EINVAL);
566	}
567
568	/* Set new parameters */
569	mddev->new_level = 0;
570	mddev->new_layout = 0;
571	mddev->new_chunk_sectors = mddev->chunk_sectors;
572	mddev->delta_disks = - mddev->raid_disks / 2;
573	mddev->raid_disks += mddev->delta_disks;
574	mddev->degraded = 0;
575	/* make sure it will be not marked as dirty */
576	mddev->recovery_cp = MaxSector;
 
577
578	create_strip_zones(mddev, &priv_conf);
579	return priv_conf;
580}
581
582static void *raid0_takeover_raid1(struct mddev *mddev)
583{
584	struct r0conf *priv_conf;
585	int chunksect;
586
587	/* Check layout:
588	 *  - (N - 1) mirror drives must be already faulty
589	 */
590	if ((mddev->raid_disks - 1) != mddev->degraded) {
591		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
592		       mdname(mddev));
593		return ERR_PTR(-EINVAL);
594	}
595
596	/*
597	 * a raid1 doesn't have the notion of chunk size, so
598	 * figure out the largest suitable size we can use.
599	 */
600	chunksect = 64 * 2; /* 64K by default */
601
602	/* The array must be an exact multiple of chunksize */
603	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
604		chunksect >>= 1;
605
606	if ((chunksect << 9) < PAGE_SIZE)
607		/* array size does not allow a suitable chunk size */
608		return ERR_PTR(-EINVAL);
609
610	/* Set new parameters */
611	mddev->new_level = 0;
612	mddev->new_layout = 0;
613	mddev->new_chunk_sectors = chunksect;
614	mddev->chunk_sectors = chunksect;
615	mddev->delta_disks = 1 - mddev->raid_disks;
616	mddev->raid_disks = 1;
617	/* make sure it will be not marked as dirty */
618	mddev->recovery_cp = MaxSector;
 
619
620	create_strip_zones(mddev, &priv_conf);
621	return priv_conf;
622}
623
624static void *raid0_takeover(struct mddev *mddev)
625{
626	/* raid0 can take over:
627	 *  raid4 - if all data disks are active.
628	 *  raid5 - providing it is Raid4 layout and one disk is faulty
629	 *  raid10 - assuming we have all necessary active disks
630	 *  raid1 - with (N -1) mirror drives faulty
631	 */
632
633	if (mddev->bitmap) {
634		printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n",
635		       mdname(mddev));
636		return ERR_PTR(-EBUSY);
637	}
638	if (mddev->level == 4)
639		return raid0_takeover_raid45(mddev);
640
641	if (mddev->level == 5) {
642		if (mddev->layout == ALGORITHM_PARITY_N)
643			return raid0_takeover_raid45(mddev);
644
645		printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
646		       mdname(mddev), ALGORITHM_PARITY_N);
647	}
648
649	if (mddev->level == 10)
650		return raid0_takeover_raid10(mddev);
651
652	if (mddev->level == 1)
653		return raid0_takeover_raid1(mddev);
654
655	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
656		mddev->level);
657
658	return ERR_PTR(-EINVAL);
659}
660
661static void raid0_quiesce(struct mddev *mddev, int state)
662{
663}
664
665static struct md_personality raid0_personality=
666{
667	.name		= "raid0",
668	.level		= 0,
669	.owner		= THIS_MODULE,
670	.make_request	= raid0_make_request,
671	.run		= raid0_run,
672	.free		= raid0_free,
673	.status		= raid0_status,
674	.size		= raid0_size,
675	.takeover	= raid0_takeover,
676	.quiesce	= raid0_quiesce,
677	.congested	= raid0_congested,
678};
679
680static int __init raid0_init (void)
681{
682	return register_md_personality (&raid0_personality);
683}
684
685static void raid0_exit (void)
686{
687	unregister_md_personality (&raid0_personality);
688}
689
690module_init(raid0_init);
691module_exit(raid0_exit);
692MODULE_LICENSE("GPL");
693MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
694MODULE_ALIAS("md-personality-2"); /* RAID0 */
695MODULE_ALIAS("md-raid0");
696MODULE_ALIAS("md-level-0");
v4.17
  1/*
  2   raid0.c : Multiple Devices driver for Linux
  3	     Copyright (C) 1994-96 Marc ZYNGIER
  4	     <zyngier@ufr-info-p7.ibp.fr> or
  5	     <maz@gloups.fdn.fr>
  6	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  7
  8   RAID-0 management functions.
  9
 10   This program is free software; you can redistribute it and/or modify
 11   it under the terms of the GNU General Public License as published by
 12   the Free Software Foundation; either version 2, or (at your option)
 13   any later version.
 14
 15   You should have received a copy of the GNU General Public License
 16   (for example /usr/src/linux/COPYING); if not, write to the Free
 17   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 18*/
 19
 20#include <linux/blkdev.h>
 21#include <linux/seq_file.h>
 22#include <linux/module.h>
 23#include <linux/slab.h>
 24#include <trace/events/block.h>
 25#include "md.h"
 26#include "raid0.h"
 27#include "raid5.h"
 28
 29#define UNSUPPORTED_MDDEV_FLAGS		\
 30	((1L << MD_HAS_JOURNAL) |	\
 31	 (1L << MD_JOURNAL_CLEAN) |	\
 32	 (1L << MD_FAILFAST_SUPPORTED) |\
 33	 (1L << MD_HAS_PPL) |		\
 34	 (1L << MD_HAS_MULTIPLE_PPLS))
 35
 36static int raid0_congested(struct mddev *mddev, int bits)
 37{
 38	struct r0conf *conf = mddev->private;
 39	struct md_rdev **devlist = conf->devlist;
 40	int raid_disks = conf->strip_zone[0].nb_dev;
 41	int i, ret = 0;
 42
 43	for (i = 0; i < raid_disks && !ret ; i++) {
 44		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
 45
 46		ret |= bdi_congested(q->backing_dev_info, bits);
 47	}
 48	return ret;
 49}
 50
 51/*
 52 * inform the user of the raid configuration
 53*/
 54static void dump_zones(struct mddev *mddev)
 55{
 56	int j, k;
 57	sector_t zone_size = 0;
 58	sector_t zone_start = 0;
 59	char b[BDEVNAME_SIZE];
 60	struct r0conf *conf = mddev->private;
 61	int raid_disks = conf->strip_zone[0].nb_dev;
 62	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
 63		 mdname(mddev),
 64		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 65	for (j = 0; j < conf->nr_strip_zones; j++) {
 66		char line[200];
 67		int len = 0;
 68
 69		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 70			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
 71					bdevname(conf->devlist[j*raid_disks
 72							       + k]->bdev, b));
 73		pr_debug("md: zone%d=[%s]\n", j, line);
 74
 75		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 76		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 
 77			(unsigned long long)zone_start>>1,
 78			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 79			(unsigned long long)zone_size>>1);
 80		zone_start = conf->strip_zone[j].zone_end;
 81	}
 82}
 83
 84static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 85{
 86	int i, c, err;
 87	sector_t curr_zone_end, sectors;
 88	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 89	struct strip_zone *zone;
 90	int cnt;
 91	char b[BDEVNAME_SIZE];
 92	char b2[BDEVNAME_SIZE];
 93	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 94	unsigned short blksize = 512;
 95
 96	*private_conf = ERR_PTR(-ENOMEM);
 97	if (!conf)
 98		return -ENOMEM;
 99	rdev_for_each(rdev1, mddev) {
100		pr_debug("md/raid0:%s: looking at %s\n",
101			 mdname(mddev),
102			 bdevname(rdev1->bdev, b));
103		c = 0;
104
105		/* round size to chunk_size */
106		sectors = rdev1->sectors;
107		sector_div(sectors, mddev->chunk_sectors);
108		rdev1->sectors = sectors * mddev->chunk_sectors;
109
110		blksize = max(blksize, queue_logical_block_size(
111				      rdev1->bdev->bd_disk->queue));
112
113		rdev_for_each(rdev2, mddev) {
114			pr_debug("md/raid0:%s:   comparing %s(%llu)"
115				 " with %s(%llu)\n",
116				 mdname(mddev),
117				 bdevname(rdev1->bdev,b),
118				 (unsigned long long)rdev1->sectors,
119				 bdevname(rdev2->bdev,b2),
120				 (unsigned long long)rdev2->sectors);
121			if (rdev2 == rdev1) {
122				pr_debug("md/raid0:%s:   END\n",
123					 mdname(mddev));
124				break;
125			}
126			if (rdev2->sectors == rdev1->sectors) {
127				/*
128				 * Not unique, don't count it as a new
129				 * group
130				 */
131				pr_debug("md/raid0:%s:   EQUAL\n",
132					 mdname(mddev));
133				c = 1;
134				break;
135			}
136			pr_debug("md/raid0:%s:   NOT EQUAL\n",
137				 mdname(mddev));
138		}
139		if (!c) {
140			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
141				 mdname(mddev));
142			conf->nr_strip_zones++;
143			pr_debug("md/raid0:%s: %d zones\n",
144				 mdname(mddev), conf->nr_strip_zones);
145		}
146	}
147	pr_debug("md/raid0:%s: FINAL %d zones\n",
148		 mdname(mddev), conf->nr_strip_zones);
149	/*
150	 * now since we have the hard sector sizes, we can make sure
151	 * chunk size is a multiple of that sector size
152	 */
153	if ((mddev->chunk_sectors << 9) % blksize) {
154		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
155			mdname(mddev),
156			mddev->chunk_sectors << 9, blksize);
157		err = -EINVAL;
158		goto abort;
159	}
160
161	err = -ENOMEM;
162	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
163				conf->nr_strip_zones, GFP_KERNEL);
164	if (!conf->strip_zone)
165		goto abort;
166	conf->devlist = kzalloc(sizeof(struct md_rdev*)*
167				conf->nr_strip_zones*mddev->raid_disks,
168				GFP_KERNEL);
169	if (!conf->devlist)
170		goto abort;
171
172	/* The first zone must contain all devices, so here we check that
173	 * there is a proper alignment of slots to devices and find them all
174	 */
175	zone = &conf->strip_zone[0];
176	cnt = 0;
177	smallest = NULL;
178	dev = conf->devlist;
179	err = -EINVAL;
180	rdev_for_each(rdev1, mddev) {
181		int j = rdev1->raid_disk;
182
183		if (mddev->level == 10) {
184			/* taking over a raid10-n2 array */
185			j /= 2;
186			rdev1->new_raid_disk = j;
187		}
188
189		if (mddev->level == 1) {
190			/* taiking over a raid1 array-
191			 * we have only one active disk
192			 */
193			j = 0;
194			rdev1->new_raid_disk = j;
195		}
196
197		if (j < 0) {
198			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
199				mdname(mddev));
 
200			goto abort;
201		}
202		if (j >= mddev->raid_disks) {
203			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
204				mdname(mddev), j);
205			goto abort;
206		}
207		if (dev[j]) {
208			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
209				mdname(mddev), j);
210			goto abort;
211		}
212		dev[j] = rdev1;
213
214		if (!smallest || (rdev1->sectors < smallest->sectors))
215			smallest = rdev1;
216		cnt++;
217	}
218	if (cnt != mddev->raid_disks) {
219		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
220			mdname(mddev), cnt, mddev->raid_disks);
221		goto abort;
222	}
223	zone->nb_dev = cnt;
224	zone->zone_end = smallest->sectors * cnt;
225
226	curr_zone_end = zone->zone_end;
227
228	/* now do the other zones */
229	for (i = 1; i < conf->nr_strip_zones; i++)
230	{
231		int j;
232
233		zone = conf->strip_zone + i;
234		dev = conf->devlist + i * mddev->raid_disks;
235
236		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
237		zone->dev_start = smallest->sectors;
238		smallest = NULL;
239		c = 0;
240
241		for (j=0; j<cnt; j++) {
242			rdev = conf->devlist[j];
243			if (rdev->sectors <= zone->dev_start) {
244				pr_debug("md/raid0:%s: checking %s ... nope\n",
245					 mdname(mddev),
246					 bdevname(rdev->bdev, b));
247				continue;
248			}
249			pr_debug("md/raid0:%s: checking %s ..."
250				 " contained as device %d\n",
251				 mdname(mddev),
252				 bdevname(rdev->bdev, b), c);
253			dev[c] = rdev;
254			c++;
255			if (!smallest || rdev->sectors < smallest->sectors) {
256				smallest = rdev;
257				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
258					 mdname(mddev),
259					 (unsigned long long)rdev->sectors);
260			}
261		}
262
263		zone->nb_dev = c;
264		sectors = (smallest->sectors - zone->dev_start) * c;
265		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
266			 mdname(mddev),
267			 zone->nb_dev, (unsigned long long)sectors);
268
269		curr_zone_end += sectors;
270		zone->zone_end = curr_zone_end;
271
272		pr_debug("md/raid0:%s: current zone start: %llu\n",
273			 mdname(mddev),
274			 (unsigned long long)smallest->sectors);
275	}
276
277	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
278	*private_conf = conf;
279
280	return 0;
281abort:
282	kfree(conf->strip_zone);
283	kfree(conf->devlist);
284	kfree(conf);
285	*private_conf = ERR_PTR(err);
286	return err;
287}
288
289/* Find the zone which holds a particular offset
290 * Update *sectorp to be an offset in that zone
291 */
292static struct strip_zone *find_zone(struct r0conf *conf,
293				    sector_t *sectorp)
294{
295	int i;
296	struct strip_zone *z = conf->strip_zone;
297	sector_t sector = *sectorp;
298
299	for (i = 0; i < conf->nr_strip_zones; i++)
300		if (sector < z[i].zone_end) {
301			if (i)
302				*sectorp = sector - z[i-1].zone_end;
303			return z + i;
304		}
305	BUG();
306}
307
308/*
309 * remaps the bio to the target device. we separate two flows.
310 * power 2 flow and a general flow for the sake of performance
311*/
312static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
313				sector_t sector, sector_t *sector_offset)
314{
315	unsigned int sect_in_chunk;
316	sector_t chunk;
317	struct r0conf *conf = mddev->private;
318	int raid_disks = conf->strip_zone[0].nb_dev;
319	unsigned int chunk_sects = mddev->chunk_sectors;
320
321	if (is_power_of_2(chunk_sects)) {
322		int chunksect_bits = ffz(~chunk_sects);
323		/* find the sector offset inside the chunk */
324		sect_in_chunk  = sector & (chunk_sects - 1);
325		sector >>= chunksect_bits;
326		/* chunk in zone */
327		chunk = *sector_offset;
328		/* quotient is the chunk in real device*/
329		sector_div(chunk, zone->nb_dev << chunksect_bits);
330	} else{
331		sect_in_chunk = sector_div(sector, chunk_sects);
332		chunk = *sector_offset;
333		sector_div(chunk, chunk_sects * zone->nb_dev);
334	}
335	/*
336	*  position the bio over the real device
337	*  real sector = chunk in device + starting of zone
338	*	+ the position in the chunk
339	*/
340	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
341	return conf->devlist[(zone - conf->strip_zone)*raid_disks
342			     + sector_div(sector, zone->nb_dev)];
343}
344
345static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
346{
347	sector_t array_sectors = 0;
348	struct md_rdev *rdev;
349
350	WARN_ONCE(sectors || raid_disks,
351		  "%s does not support generic reshape\n", __func__);
352
353	rdev_for_each(rdev, mddev)
354		array_sectors += (rdev->sectors &
355				  ~(sector_t)(mddev->chunk_sectors-1));
356
357	return array_sectors;
358}
359
360static void raid0_free(struct mddev *mddev, void *priv);
361
362static int raid0_run(struct mddev *mddev)
363{
364	struct r0conf *conf;
365	int ret;
366
367	if (mddev->chunk_sectors == 0) {
368		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
 
369		return -EINVAL;
370	}
371	if (md_check_no_bitmap(mddev))
372		return -EINVAL;
373
374	/* if private is not null, we are here after takeover */
375	if (mddev->private == NULL) {
376		ret = create_strip_zones(mddev, &conf);
377		if (ret < 0)
378			return ret;
379		mddev->private = conf;
380	}
381	conf = mddev->private;
382	if (mddev->queue) {
383		struct md_rdev *rdev;
384		bool discard_supported = false;
385
386		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
387		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
388		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
389		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
390
391		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
392		blk_queue_io_opt(mddev->queue,
393				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
394
395		rdev_for_each(rdev, mddev) {
396			disk_stack_limits(mddev->gendisk, rdev->bdev,
397					  rdev->data_offset << 9);
398			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
399				discard_supported = true;
400		}
401		if (!discard_supported)
402			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
403		else
404			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
405	}
406
407	/* calculate array device size */
408	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
409
410	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
411		 mdname(mddev),
412		 (unsigned long long)mddev->array_sectors);
413
414	if (mddev->queue) {
415		/* calculate the max read-ahead size.
416		 * For read-ahead of large files to be effective, we need to
417		 * readahead at least twice a whole stripe. i.e. number of devices
418		 * multiplied by chunk size times 2.
419		 * If an individual device has an ra_pages greater than the
420		 * chunk size, then we will not drive that device as hard as it
421		 * wants.  We consider this a configuration error: a larger
422		 * chunksize should be used in that case.
423		 */
424		int stripe = mddev->raid_disks *
425			(mddev->chunk_sectors << 9) / PAGE_SIZE;
426		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
427			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
428	}
429
430	dump_zones(mddev);
431
432	ret = md_integrity_register(mddev);
433
434	return ret;
435}
436
437static void raid0_free(struct mddev *mddev, void *priv)
438{
439	struct r0conf *conf = priv;
440
441	kfree(conf->strip_zone);
442	kfree(conf->devlist);
443	kfree(conf);
444}
445
446/*
447 * Is io distribute over 1 or more chunks ?
448*/
449static inline int is_io_in_chunk_boundary(struct mddev *mddev,
450			unsigned int chunk_sects, struct bio *bio)
451{
452	if (likely(is_power_of_2(chunk_sects))) {
453		return chunk_sects >=
454			((bio->bi_iter.bi_sector & (chunk_sects-1))
455					+ bio_sectors(bio));
456	} else{
457		sector_t sector = bio->bi_iter.bi_sector;
458		return chunk_sects >= (sector_div(sector, chunk_sects)
459						+ bio_sectors(bio));
460	}
461}
462
463static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
464{
465	struct r0conf *conf = mddev->private;
466	struct strip_zone *zone;
467	sector_t start = bio->bi_iter.bi_sector;
468	sector_t end;
469	unsigned int stripe_size;
470	sector_t first_stripe_index, last_stripe_index;
471	sector_t start_disk_offset;
472	unsigned int start_disk_index;
473	sector_t end_disk_offset;
474	unsigned int end_disk_index;
475	unsigned int disk;
476
477	zone = find_zone(conf, &start);
478
479	if (bio_end_sector(bio) > zone->zone_end) {
480		struct bio *split = bio_split(bio,
481			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
482			mddev->bio_set);
483		bio_chain(split, bio);
484		generic_make_request(bio);
485		bio = split;
486		end = zone->zone_end;
487	} else
488		end = bio_end_sector(bio);
489
490	if (zone != conf->strip_zone)
491		end = end - zone[-1].zone_end;
492
493	/* Now start and end is the offset in zone */
494	stripe_size = zone->nb_dev * mddev->chunk_sectors;
495
496	first_stripe_index = start;
497	sector_div(first_stripe_index, stripe_size);
498	last_stripe_index = end;
499	sector_div(last_stripe_index, stripe_size);
500
501	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
502		mddev->chunk_sectors;
503	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
504		mddev->chunk_sectors) +
505		first_stripe_index * mddev->chunk_sectors;
506	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
507		mddev->chunk_sectors;
508	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
509		mddev->chunk_sectors) +
510		last_stripe_index * mddev->chunk_sectors;
511
512	for (disk = 0; disk < zone->nb_dev; disk++) {
513		sector_t dev_start, dev_end;
514		struct bio *discard_bio = NULL;
515		struct md_rdev *rdev;
516
517		if (disk < start_disk_index)
518			dev_start = (first_stripe_index + 1) *
519				mddev->chunk_sectors;
520		else if (disk > start_disk_index)
521			dev_start = first_stripe_index * mddev->chunk_sectors;
522		else
523			dev_start = start_disk_offset;
524
525		if (disk < end_disk_index)
526			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
527		else if (disk > end_disk_index)
528			dev_end = last_stripe_index * mddev->chunk_sectors;
529		else
530			dev_end = end_disk_offset;
531
532		if (dev_end <= dev_start)
533			continue;
534
535		rdev = conf->devlist[(zone - conf->strip_zone) *
536			conf->strip_zone[0].nb_dev + disk];
537		if (__blkdev_issue_discard(rdev->bdev,
538			dev_start + zone->dev_start + rdev->data_offset,
539			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
540		    !discard_bio)
541			continue;
542		bio_chain(discard_bio, bio);
543		bio_clone_blkcg_association(discard_bio, bio);
544		if (mddev->gendisk)
545			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
546				discard_bio, disk_devt(mddev->gendisk),
547				bio->bi_iter.bi_sector);
548		generic_make_request(discard_bio);
549	}
550	bio_endio(bio);
551}
552
553static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
554{
555	struct strip_zone *zone;
556	struct md_rdev *tmp_dev;
557	sector_t bio_sector;
558	sector_t sector;
559	unsigned chunk_sects;
560	unsigned sectors;
561
562	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
563		md_flush_request(mddev, bio);
564		return true;
565	}
566
567	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
568		raid0_handle_discard(mddev, bio);
569		return true;
570	}
571
572	bio_sector = bio->bi_iter.bi_sector;
573	sector = bio_sector;
574	chunk_sects = mddev->chunk_sectors;
575
576	sectors = chunk_sects -
577		(likely(is_power_of_2(chunk_sects))
578		 ? (sector & (chunk_sects-1))
579		 : sector_div(sector, chunk_sects));
580
581	/* Restore due to sector_div */
582	sector = bio_sector;
583
584	if (sectors < bio_sectors(bio)) {
585		struct bio *split = bio_split(bio, sectors, GFP_NOIO, mddev->bio_set);
586		bio_chain(split, bio);
587		generic_make_request(bio);
588		bio = split;
589	}
590
591	zone = find_zone(mddev->private, &sector);
592	tmp_dev = map_sector(mddev, zone, sector, &sector);
593	bio_set_dev(bio, tmp_dev->bdev);
594	bio->bi_iter.bi_sector = sector + zone->dev_start +
595		tmp_dev->data_offset;
596
597	if (mddev->gendisk)
598		trace_block_bio_remap(bio->bi_disk->queue, bio,
599				disk_devt(mddev->gendisk), bio_sector);
600	mddev_check_writesame(mddev, bio);
601	mddev_check_write_zeroes(mddev, bio);
602	generic_make_request(bio);
603	return true;
604}
605
606static void raid0_status(struct seq_file *seq, struct mddev *mddev)
607{
608	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
609	return;
610}
611
612static void *raid0_takeover_raid45(struct mddev *mddev)
613{
614	struct md_rdev *rdev;
615	struct r0conf *priv_conf;
616
617	if (mddev->degraded != 1) {
618		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
619			mdname(mddev),
620			mddev->degraded);
621		return ERR_PTR(-EINVAL);
622	}
623
624	rdev_for_each(rdev, mddev) {
625		/* check slot number for a disk */
626		if (rdev->raid_disk == mddev->raid_disks-1) {
627			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
628				mdname(mddev));
629			return ERR_PTR(-EINVAL);
630		}
631		rdev->sectors = mddev->dev_sectors;
632	}
633
634	/* Set new parameters */
635	mddev->new_level = 0;
636	mddev->new_layout = 0;
637	mddev->new_chunk_sectors = mddev->chunk_sectors;
638	mddev->raid_disks--;
639	mddev->delta_disks = -1;
640	/* make sure it will be not marked as dirty */
641	mddev->recovery_cp = MaxSector;
642	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
643
644	create_strip_zones(mddev, &priv_conf);
645
646	return priv_conf;
647}
648
649static void *raid0_takeover_raid10(struct mddev *mddev)
650{
651	struct r0conf *priv_conf;
652
653	/* Check layout:
654	 *  - far_copies must be 1
655	 *  - near_copies must be 2
656	 *  - disks number must be even
657	 *  - all mirrors must be already degraded
658	 */
659	if (mddev->layout != ((1 << 8) + 2)) {
660		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
661			mdname(mddev),
662			mddev->layout);
663		return ERR_PTR(-EINVAL);
664	}
665	if (mddev->raid_disks & 1) {
666		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
667			mdname(mddev));
668		return ERR_PTR(-EINVAL);
669	}
670	if (mddev->degraded != (mddev->raid_disks>>1)) {
671		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
672			mdname(mddev));
673		return ERR_PTR(-EINVAL);
674	}
675
676	/* Set new parameters */
677	mddev->new_level = 0;
678	mddev->new_layout = 0;
679	mddev->new_chunk_sectors = mddev->chunk_sectors;
680	mddev->delta_disks = - mddev->raid_disks / 2;
681	mddev->raid_disks += mddev->delta_disks;
682	mddev->degraded = 0;
683	/* make sure it will be not marked as dirty */
684	mddev->recovery_cp = MaxSector;
685	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
686
687	create_strip_zones(mddev, &priv_conf);
688	return priv_conf;
689}
690
691static void *raid0_takeover_raid1(struct mddev *mddev)
692{
693	struct r0conf *priv_conf;
694	int chunksect;
695
696	/* Check layout:
697	 *  - (N - 1) mirror drives must be already faulty
698	 */
699	if ((mddev->raid_disks - 1) != mddev->degraded) {
700		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
701		       mdname(mddev));
702		return ERR_PTR(-EINVAL);
703	}
704
705	/*
706	 * a raid1 doesn't have the notion of chunk size, so
707	 * figure out the largest suitable size we can use.
708	 */
709	chunksect = 64 * 2; /* 64K by default */
710
711	/* The array must be an exact multiple of chunksize */
712	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
713		chunksect >>= 1;
714
715	if ((chunksect << 9) < PAGE_SIZE)
716		/* array size does not allow a suitable chunk size */
717		return ERR_PTR(-EINVAL);
718
719	/* Set new parameters */
720	mddev->new_level = 0;
721	mddev->new_layout = 0;
722	mddev->new_chunk_sectors = chunksect;
723	mddev->chunk_sectors = chunksect;
724	mddev->delta_disks = 1 - mddev->raid_disks;
725	mddev->raid_disks = 1;
726	/* make sure it will be not marked as dirty */
727	mddev->recovery_cp = MaxSector;
728	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
729
730	create_strip_zones(mddev, &priv_conf);
731	return priv_conf;
732}
733
734static void *raid0_takeover(struct mddev *mddev)
735{
736	/* raid0 can take over:
737	 *  raid4 - if all data disks are active.
738	 *  raid5 - providing it is Raid4 layout and one disk is faulty
739	 *  raid10 - assuming we have all necessary active disks
740	 *  raid1 - with (N -1) mirror drives faulty
741	 */
742
743	if (mddev->bitmap) {
744		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
745			mdname(mddev));
746		return ERR_PTR(-EBUSY);
747	}
748	if (mddev->level == 4)
749		return raid0_takeover_raid45(mddev);
750
751	if (mddev->level == 5) {
752		if (mddev->layout == ALGORITHM_PARITY_N)
753			return raid0_takeover_raid45(mddev);
754
755		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
756			mdname(mddev), ALGORITHM_PARITY_N);
757	}
758
759	if (mddev->level == 10)
760		return raid0_takeover_raid10(mddev);
761
762	if (mddev->level == 1)
763		return raid0_takeover_raid1(mddev);
764
765	pr_warn("Takeover from raid%i to raid0 not supported\n",
766		mddev->level);
767
768	return ERR_PTR(-EINVAL);
769}
770
771static void raid0_quiesce(struct mddev *mddev, int quiesce)
772{
773}
774
775static struct md_personality raid0_personality=
776{
777	.name		= "raid0",
778	.level		= 0,
779	.owner		= THIS_MODULE,
780	.make_request	= raid0_make_request,
781	.run		= raid0_run,
782	.free		= raid0_free,
783	.status		= raid0_status,
784	.size		= raid0_size,
785	.takeover	= raid0_takeover,
786	.quiesce	= raid0_quiesce,
787	.congested	= raid0_congested,
788};
789
790static int __init raid0_init (void)
791{
792	return register_md_personality (&raid0_personality);
793}
794
795static void raid0_exit (void)
796{
797	unregister_md_personality (&raid0_personality);
798}
799
800module_init(raid0_init);
801module_exit(raid0_exit);
802MODULE_LICENSE("GPL");
803MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
804MODULE_ALIAS("md-personality-2"); /* RAID0 */
805MODULE_ALIAS("md-raid0");
806MODULE_ALIAS("md-level-0");