Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2   raid0.c : Multiple Devices driver for Linux
  3             Copyright (C) 1994-96 Marc ZYNGIER
  4	     <zyngier@ufr-info-p7.ibp.fr> or
  5	     <maz@gloups.fdn.fr>
  6             Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  7
  8
  9   RAID-0 management functions.
 10
 11   This program is free software; you can redistribute it and/or modify
 12   it under the terms of the GNU General Public License as published by
 13   the Free Software Foundation; either version 2, or (at your option)
 14   any later version.
 15   
 16   You should have received a copy of the GNU General Public License
 17   (for example /usr/src/linux/COPYING); if not, write to the Free
 18   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
 19*/
 20
 21#include <linux/blkdev.h>
 22#include <linux/seq_file.h>
 23#include <linux/module.h>
 24#include <linux/slab.h>
 
 25#include "md.h"
 26#include "raid0.h"
 27#include "raid5.h"
 28
 29static int raid0_congested(void *data, int bits)
 30{
 31	struct mddev *mddev = data;
 32	struct r0conf *conf = mddev->private;
 33	struct md_rdev **devlist = conf->devlist;
 34	int raid_disks = conf->strip_zone[0].nb_dev;
 35	int i, ret = 0;
 36
 37	if (mddev_congested(mddev, bits))
 38		return 1;
 39
 40	for (i = 0; i < raid_disks && !ret ; i++) {
 41		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
 42
 43		ret |= bdi_congested(&q->backing_dev_info, bits);
 44	}
 45	return ret;
 46}
 47
 48/*
 49 * inform the user of the raid configuration
 50*/
 51static void dump_zones(struct mddev *mddev)
 52{
 53	int j, k;
 54	sector_t zone_size = 0;
 55	sector_t zone_start = 0;
 56	char b[BDEVNAME_SIZE];
 57	struct r0conf *conf = mddev->private;
 58	int raid_disks = conf->strip_zone[0].nb_dev;
 59	printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
 60	       mdname(mddev),
 61	       conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 62	for (j = 0; j < conf->nr_strip_zones; j++) {
 63		printk(KERN_INFO "md: zone%d=[", j);
 
 
 64		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 65			printk(KERN_CONT "%s%s", k?"/":"",
 66			bdevname(conf->devlist[j*raid_disks
 67						+ k]->bdev, b));
 68		printk(KERN_CONT "]\n");
 69
 70		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 71		printk(KERN_INFO "      zone-offset=%10lluKB, "
 72				"device-offset=%10lluKB, size=%10lluKB\n",
 73			(unsigned long long)zone_start>>1,
 74			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 75			(unsigned long long)zone_size>>1);
 76		zone_start = conf->strip_zone[j].zone_end;
 77	}
 78	printk(KERN_INFO "\n");
 79}
 80
 81static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 82{
 83	int i, c, err;
 84	sector_t curr_zone_end, sectors;
 85	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 86	struct strip_zone *zone;
 87	int cnt;
 88	char b[BDEVNAME_SIZE];
 89	char b2[BDEVNAME_SIZE];
 90	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 
 91
 
 92	if (!conf)
 93		return -ENOMEM;
 94	rdev_for_each(rdev1, mddev) {
 95		pr_debug("md/raid0:%s: looking at %s\n",
 96			 mdname(mddev),
 97			 bdevname(rdev1->bdev, b));
 98		c = 0;
 99
100		/* round size to chunk_size */
101		sectors = rdev1->sectors;
102		sector_div(sectors, mddev->chunk_sectors);
103		rdev1->sectors = sectors * mddev->chunk_sectors;
104
 
 
 
105		rdev_for_each(rdev2, mddev) {
106			pr_debug("md/raid0:%s:   comparing %s(%llu)"
107				 " with %s(%llu)\n",
108				 mdname(mddev),
109				 bdevname(rdev1->bdev,b),
110				 (unsigned long long)rdev1->sectors,
111				 bdevname(rdev2->bdev,b2),
112				 (unsigned long long)rdev2->sectors);
113			if (rdev2 == rdev1) {
114				pr_debug("md/raid0:%s:   END\n",
115					 mdname(mddev));
116				break;
117			}
118			if (rdev2->sectors == rdev1->sectors) {
119				/*
120				 * Not unique, don't count it as a new
121				 * group
122				 */
123				pr_debug("md/raid0:%s:   EQUAL\n",
124					 mdname(mddev));
125				c = 1;
126				break;
127			}
128			pr_debug("md/raid0:%s:   NOT EQUAL\n",
129				 mdname(mddev));
130		}
131		if (!c) {
132			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
133				 mdname(mddev));
134			conf->nr_strip_zones++;
135			pr_debug("md/raid0:%s: %d zones\n",
136				 mdname(mddev), conf->nr_strip_zones);
137		}
138	}
139	pr_debug("md/raid0:%s: FINAL %d zones\n",
140		 mdname(mddev), conf->nr_strip_zones);
 
 
 
 
 
 
 
 
 
 
 
 
 
141	err = -ENOMEM;
142	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
143				conf->nr_strip_zones, GFP_KERNEL);
 
144	if (!conf->strip_zone)
145		goto abort;
146	conf->devlist = kzalloc(sizeof(struct md_rdev*)*
147				conf->nr_strip_zones*mddev->raid_disks,
 
148				GFP_KERNEL);
149	if (!conf->devlist)
150		goto abort;
151
152	/* The first zone must contain all devices, so here we check that
153	 * there is a proper alignment of slots to devices and find them all
154	 */
155	zone = &conf->strip_zone[0];
156	cnt = 0;
157	smallest = NULL;
158	dev = conf->devlist;
159	err = -EINVAL;
160	rdev_for_each(rdev1, mddev) {
161		int j = rdev1->raid_disk;
162
163		if (mddev->level == 10) {
164			/* taking over a raid10-n2 array */
165			j /= 2;
166			rdev1->new_raid_disk = j;
167		}
168
169		if (mddev->level == 1) {
170			/* taiking over a raid1 array-
171			 * we have only one active disk
172			 */
173			j = 0;
174			rdev1->new_raid_disk = j;
175		}
176
177		if (j < 0 || j >= mddev->raid_disks) {
178			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
179			       "aborting!\n", mdname(mddev), j);
 
 
 
 
 
180			goto abort;
181		}
182		if (dev[j]) {
183			printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
184			       "aborting!\n", mdname(mddev), j);
185			goto abort;
186		}
187		dev[j] = rdev1;
188
189		disk_stack_limits(mddev->gendisk, rdev1->bdev,
190				  rdev1->data_offset << 9);
191
192		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
193			conf->has_merge_bvec = 1;
194
195		if (!smallest || (rdev1->sectors < smallest->sectors))
196			smallest = rdev1;
197		cnt++;
198	}
199	if (cnt != mddev->raid_disks) {
200		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
201		       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
202		goto abort;
203	}
204	zone->nb_dev = cnt;
205	zone->zone_end = smallest->sectors * cnt;
206
207	curr_zone_end = zone->zone_end;
208
209	/* now do the other zones */
210	for (i = 1; i < conf->nr_strip_zones; i++)
211	{
212		int j;
213
214		zone = conf->strip_zone + i;
215		dev = conf->devlist + i * mddev->raid_disks;
216
217		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
218		zone->dev_start = smallest->sectors;
219		smallest = NULL;
220		c = 0;
221
222		for (j=0; j<cnt; j++) {
223			rdev = conf->devlist[j];
224			if (rdev->sectors <= zone->dev_start) {
225				pr_debug("md/raid0:%s: checking %s ... nope\n",
226					 mdname(mddev),
227					 bdevname(rdev->bdev, b));
228				continue;
229			}
230			pr_debug("md/raid0:%s: checking %s ..."
231				 " contained as device %d\n",
232				 mdname(mddev),
233				 bdevname(rdev->bdev, b), c);
234			dev[c] = rdev;
235			c++;
236			if (!smallest || rdev->sectors < smallest->sectors) {
237				smallest = rdev;
238				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
239					 mdname(mddev),
240					 (unsigned long long)rdev->sectors);
241			}
242		}
243
244		zone->nb_dev = c;
245		sectors = (smallest->sectors - zone->dev_start) * c;
246		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
247			 mdname(mddev),
248			 zone->nb_dev, (unsigned long long)sectors);
249
250		curr_zone_end += sectors;
251		zone->zone_end = curr_zone_end;
252
253		pr_debug("md/raid0:%s: current zone start: %llu\n",
254			 mdname(mddev),
255			 (unsigned long long)smallest->sectors);
256	}
257	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
258	mddev->queue->backing_dev_info.congested_data = mddev;
259
260	/*
261	 * now since we have the hard sector sizes, we can make sure
262	 * chunk size is a multiple of that sector size
263	 */
264	if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
265		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
266		       mdname(mddev),
267		       mddev->chunk_sectors << 9);
 
 
 
 
 
268		goto abort;
269	}
270
271	blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
272	blk_queue_io_opt(mddev->queue,
273			 (mddev->chunk_sectors << 9) * mddev->raid_disks);
 
 
 
 
 
 
 
 
274
275	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
276	*private_conf = conf;
277
278	return 0;
279abort:
280	kfree(conf->strip_zone);
281	kfree(conf->devlist);
282	kfree(conf);
283	*private_conf = NULL;
284	return err;
285}
286
287/* Find the zone which holds a particular offset
288 * Update *sectorp to be an offset in that zone
289 */
290static struct strip_zone *find_zone(struct r0conf *conf,
291				    sector_t *sectorp)
292{
293	int i;
294	struct strip_zone *z = conf->strip_zone;
295	sector_t sector = *sectorp;
296
297	for (i = 0; i < conf->nr_strip_zones; i++)
298		if (sector < z[i].zone_end) {
299			if (i)
300				*sectorp = sector - z[i-1].zone_end;
301			return z + i;
302		}
303	BUG();
304}
305
306/*
307 * remaps the bio to the target device. we separate two flows.
308 * power 2 flow and a general flow for the sake of perfromance
309*/
310static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
311				sector_t sector, sector_t *sector_offset)
312{
313	unsigned int sect_in_chunk;
314	sector_t chunk;
315	struct r0conf *conf = mddev->private;
316	int raid_disks = conf->strip_zone[0].nb_dev;
317	unsigned int chunk_sects = mddev->chunk_sectors;
318
319	if (is_power_of_2(chunk_sects)) {
320		int chunksect_bits = ffz(~chunk_sects);
321		/* find the sector offset inside the chunk */
322		sect_in_chunk  = sector & (chunk_sects - 1);
323		sector >>= chunksect_bits;
324		/* chunk in zone */
325		chunk = *sector_offset;
326		/* quotient is the chunk in real device*/
327		sector_div(chunk, zone->nb_dev << chunksect_bits);
328	} else{
329		sect_in_chunk = sector_div(sector, chunk_sects);
330		chunk = *sector_offset;
331		sector_div(chunk, chunk_sects * zone->nb_dev);
332	}
333	/*
334	*  position the bio over the real device
335	*  real sector = chunk in device + starting of zone
336	*	+ the position in the chunk
337	*/
338	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
339	return conf->devlist[(zone - conf->strip_zone)*raid_disks
340			     + sector_div(sector, zone->nb_dev)];
341}
342
343/**
344 *	raid0_mergeable_bvec -- tell bio layer if two requests can be merged
345 *	@q: request queue
346 *	@bvm: properties of new bio
347 *	@biovec: the request that could be merged to it.
348 *
349 *	Return amount of bytes we can accept at this offset
350 */
351static int raid0_mergeable_bvec(struct request_queue *q,
352				struct bvec_merge_data *bvm,
353				struct bio_vec *biovec)
354{
355	struct mddev *mddev = q->queuedata;
356	struct r0conf *conf = mddev->private;
357	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
358	sector_t sector_offset = sector;
359	int max;
360	unsigned int chunk_sectors = mddev->chunk_sectors;
361	unsigned int bio_sectors = bvm->bi_size >> 9;
362	struct strip_zone *zone;
363	struct md_rdev *rdev;
364	struct request_queue *subq;
365
366	if (is_power_of_2(chunk_sectors))
367		max =  (chunk_sectors - ((sector & (chunk_sectors-1))
368						+ bio_sectors)) << 9;
369	else
370		max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
371						+ bio_sectors)) << 9;
372	if (max < 0)
373		max = 0; /* bio_add cannot handle a negative return */
374	if (max <= biovec->bv_len && bio_sectors == 0)
375		return biovec->bv_len;
376	if (max < biovec->bv_len)
377		/* too small already, no need to check further */
378		return max;
379	if (!conf->has_merge_bvec)
380		return max;
381
382	/* May need to check subordinate device */
383	sector = sector_offset;
384	zone = find_zone(mddev->private, &sector_offset);
385	rdev = map_sector(mddev, zone, sector, &sector_offset);
386	subq = bdev_get_queue(rdev->bdev);
387	if (subq->merge_bvec_fn) {
388		bvm->bi_bdev = rdev->bdev;
389		bvm->bi_sector = sector_offset + zone->dev_start +
390			rdev->data_offset;
391		return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
392	} else
393		return max;
394}
395
396static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
397{
398	sector_t array_sectors = 0;
399	struct md_rdev *rdev;
400
401	WARN_ONCE(sectors || raid_disks,
402		  "%s does not support generic reshape\n", __func__);
403
404	rdev_for_each(rdev, mddev)
405		array_sectors += rdev->sectors;
 
406
407	return array_sectors;
408}
409
410static int raid0_stop(struct mddev *mddev);
 
 
 
 
 
 
 
 
 
 
 
 
411
412static int raid0_run(struct mddev *mddev)
413{
414	struct r0conf *conf;
415	int ret;
416
417	if (mddev->chunk_sectors == 0) {
418		printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
419		       mdname(mddev));
420		return -EINVAL;
421	}
422	if (md_check_no_bitmap(mddev))
423		return -EINVAL;
424	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
425
426	/* if private is not null, we are here after takeover */
427	if (mddev->private == NULL) {
428		ret = create_strip_zones(mddev, &conf);
429		if (ret < 0)
430			return ret;
431		mddev->private = conf;
432	}
433	conf = mddev->private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
435	/* calculate array device size */
436	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
437
438	printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
439	       mdname(mddev),
440	       (unsigned long long)mddev->array_sectors);
441	/* calculate the max read-ahead size.
442	 * For read-ahead of large files to be effective, we need to
443	 * readahead at least twice a whole stripe. i.e. number of devices
444	 * multiplied by chunk size times 2.
445	 * If an individual device has an ra_pages greater than the
446	 * chunk size, then we will not drive that device as hard as it
447	 * wants.  We consider this a configuration error: a larger
448	 * chunksize should be used in that case.
449	 */
450	{
451		int stripe = mddev->raid_disks *
452			(mddev->chunk_sectors << 9) / PAGE_SIZE;
453		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
454			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
455	}
456
457	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
458	dump_zones(mddev);
459
460	ret = md_integrity_register(mddev);
461	if (ret)
462		raid0_stop(mddev);
463
464	return ret;
465}
466
467static int raid0_stop(struct mddev *mddev)
 
 
 
 
 
 
 
 
 
468{
469	struct r0conf *conf = mddev->private;
470
471	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
472	kfree(conf->strip_zone);
473	kfree(conf->devlist);
474	kfree(conf);
475	mddev->private = NULL;
476	return 0;
477}
478
479/*
480 * Is io distribute over 1 or more chunks ?
481*/
482static inline int is_io_in_chunk_boundary(struct mddev *mddev,
483			unsigned int chunk_sects, struct bio *bio)
484{
485	if (likely(is_power_of_2(chunk_sects))) {
486		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
487					+ (bio->bi_size >> 9));
488	} else{
489		sector_t sector = bio->bi_sector;
490		return chunk_sects >= (sector_div(sector, chunk_sects)
491						+ (bio->bi_size >> 9));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
492	}
 
493}
494
495static void raid0_make_request(struct mddev *mddev, struct bio *bio)
496{
497	unsigned int chunk_sects;
498	sector_t sector_offset;
499	struct strip_zone *zone;
500	struct md_rdev *tmp_dev;
 
 
 
 
501
502	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
503		md_flush_request(mddev, bio);
 
 
 
 
 
 
 
 
 
504		return;
505	}
506
507	chunk_sects = mddev->chunk_sectors;
508	if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
509		sector_t sector = bio->bi_sector;
510		struct bio_pair *bp;
511		/* Sanity check -- queue functions should prevent this happening */
512		if (bio->bi_vcnt != 1 ||
513		    bio->bi_idx != 0)
514			goto bad_map;
515		/* This is a one page bio that upper layers
516		 * refuse to split for us, so we need to split it.
517		 */
518		if (likely(is_power_of_2(chunk_sects)))
519			bp = bio_split(bio, chunk_sects - (sector &
520							   (chunk_sects-1)));
521		else
522			bp = bio_split(bio, chunk_sects -
523				       sector_div(sector, chunk_sects));
524		raid0_make_request(mddev, &bp->bio1);
525		raid0_make_request(mddev, &bp->bio2);
526		bio_pair_release(bp);
527		return;
528	}
529
530	sector_offset = bio->bi_sector;
531	zone = find_zone(mddev->private, &sector_offset);
532	tmp_dev = map_sector(mddev, zone, bio->bi_sector,
533			     &sector_offset);
534	bio->bi_bdev = tmp_dev->bdev;
535	bio->bi_sector = sector_offset + zone->dev_start +
536		tmp_dev->data_offset;
537
538	generic_make_request(bio);
539	return;
 
 
 
 
540
541bad_map:
542	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
543	       " or bigger than %dk %llu %d\n",
544	       mdname(mddev), chunk_sects / 2,
545	       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
546
547	bio_io_error(bio);
548	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549}
550
551static void raid0_status(struct seq_file *seq, struct mddev *mddev)
552{
553	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
554	return;
555}
556
 
 
 
 
 
 
 
 
 
 
557static void *raid0_takeover_raid45(struct mddev *mddev)
558{
559	struct md_rdev *rdev;
560	struct r0conf *priv_conf;
561
562	if (mddev->degraded != 1) {
563		printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
564		       mdname(mddev),
565		       mddev->degraded);
566		return ERR_PTR(-EINVAL);
567	}
568
569	rdev_for_each(rdev, mddev) {
570		/* check slot number for a disk */
571		if (rdev->raid_disk == mddev->raid_disks-1) {
572			printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
573			       mdname(mddev));
574			return ERR_PTR(-EINVAL);
575		}
 
576	}
577
578	/* Set new parameters */
579	mddev->new_level = 0;
580	mddev->new_layout = 0;
581	mddev->new_chunk_sectors = mddev->chunk_sectors;
582	mddev->raid_disks--;
583	mddev->delta_disks = -1;
584	/* make sure it will be not marked as dirty */
585	mddev->recovery_cp = MaxSector;
 
586
587	create_strip_zones(mddev, &priv_conf);
 
588	return priv_conf;
589}
590
591static void *raid0_takeover_raid10(struct mddev *mddev)
592{
593	struct r0conf *priv_conf;
594
595	/* Check layout:
596	 *  - far_copies must be 1
597	 *  - near_copies must be 2
598	 *  - disks number must be even
599	 *  - all mirrors must be already degraded
600	 */
601	if (mddev->layout != ((1 << 8) + 2)) {
602		printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
603		       mdname(mddev),
604		       mddev->layout);
605		return ERR_PTR(-EINVAL);
606	}
607	if (mddev->raid_disks & 1) {
608		printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
609		       mdname(mddev));
610		return ERR_PTR(-EINVAL);
611	}
612	if (mddev->degraded != (mddev->raid_disks>>1)) {
613		printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
614		       mdname(mddev));
615		return ERR_PTR(-EINVAL);
616	}
617
618	/* Set new parameters */
619	mddev->new_level = 0;
620	mddev->new_layout = 0;
621	mddev->new_chunk_sectors = mddev->chunk_sectors;
622	mddev->delta_disks = - mddev->raid_disks / 2;
623	mddev->raid_disks += mddev->delta_disks;
624	mddev->degraded = 0;
625	/* make sure it will be not marked as dirty */
626	mddev->recovery_cp = MaxSector;
 
627
628	create_strip_zones(mddev, &priv_conf);
629	return priv_conf;
630}
631
632static void *raid0_takeover_raid1(struct mddev *mddev)
633{
634	struct r0conf *priv_conf;
635	int chunksect;
636
637	/* Check layout:
638	 *  - (N - 1) mirror drives must be already faulty
639	 */
640	if ((mddev->raid_disks - 1) != mddev->degraded) {
641		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
642		       mdname(mddev));
643		return ERR_PTR(-EINVAL);
644	}
645
646	/*
647	 * a raid1 doesn't have the notion of chunk size, so
648	 * figure out the largest suitable size we can use.
649	 */
650	chunksect = 64 * 2; /* 64K by default */
651
652	/* The array must be an exact multiple of chunksize */
653	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
654		chunksect >>= 1;
655
656	if ((chunksect << 9) < PAGE_SIZE)
657		/* array size does not allow a suitable chunk size */
658		return ERR_PTR(-EINVAL);
659
660	/* Set new parameters */
661	mddev->new_level = 0;
662	mddev->new_layout = 0;
663	mddev->new_chunk_sectors = chunksect;
664	mddev->chunk_sectors = chunksect;
665	mddev->delta_disks = 1 - mddev->raid_disks;
666	mddev->raid_disks = 1;
667	/* make sure it will be not marked as dirty */
668	mddev->recovery_cp = MaxSector;
 
669
670	create_strip_zones(mddev, &priv_conf);
671	return priv_conf;
672}
673
674static void *raid0_takeover(struct mddev *mddev)
675{
676	/* raid0 can take over:
677	 *  raid4 - if all data disks are active.
678	 *  raid5 - providing it is Raid4 layout and one disk is faulty
679	 *  raid10 - assuming we have all necessary active disks
680	 *  raid1 - with (N -1) mirror drives faulty
681	 */
 
 
 
 
 
 
682	if (mddev->level == 4)
683		return raid0_takeover_raid45(mddev);
684
685	if (mddev->level == 5) {
686		if (mddev->layout == ALGORITHM_PARITY_N)
687			return raid0_takeover_raid45(mddev);
688
689		printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
690		       mdname(mddev), ALGORITHM_PARITY_N);
691	}
692
693	if (mddev->level == 10)
694		return raid0_takeover_raid10(mddev);
695
696	if (mddev->level == 1)
697		return raid0_takeover_raid1(mddev);
698
699	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
700		mddev->level);
701
702	return ERR_PTR(-EINVAL);
703}
704
705static void raid0_quiesce(struct mddev *mddev, int state)
706{
707}
708
709static struct md_personality raid0_personality=
710{
711	.name		= "raid0",
712	.level		= 0,
713	.owner		= THIS_MODULE,
714	.make_request	= raid0_make_request,
715	.run		= raid0_run,
716	.stop		= raid0_stop,
717	.status		= raid0_status,
718	.size		= raid0_size,
719	.takeover	= raid0_takeover,
720	.quiesce	= raid0_quiesce,
 
721};
722
723static int __init raid0_init (void)
724{
725	return register_md_personality (&raid0_personality);
726}
727
728static void raid0_exit (void)
729{
730	unregister_md_personality (&raid0_personality);
731}
732
733module_init(raid0_init);
734module_exit(raid0_exit);
735MODULE_LICENSE("GPL");
736MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
737MODULE_ALIAS("md-personality-2"); /* RAID0 */
738MODULE_ALIAS("md-raid0");
739MODULE_ALIAS("md-level-0");
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3   raid0.c : Multiple Devices driver for Linux
  4	     Copyright (C) 1994-96 Marc ZYNGIER
  5	     <zyngier@ufr-info-p7.ibp.fr> or
  6	     <maz@gloups.fdn.fr>
  7	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
 
  8
  9   RAID-0 management functions.
 10
 
 
 
 
 
 
 
 
 11*/
 12
 13#include <linux/blkdev.h>
 14#include <linux/seq_file.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17#include <trace/events/block.h>
 18#include "md.h"
 19#include "raid0.h"
 20#include "raid5.h"
 21
 22static int default_layout = 0;
 23module_param(default_layout, int, 0644);
 
 
 
 
 
 24
 25#define UNSUPPORTED_MDDEV_FLAGS		\
 26	((1L << MD_HAS_JOURNAL) |	\
 27	 (1L << MD_JOURNAL_CLEAN) |	\
 28	 (1L << MD_FAILFAST_SUPPORTED) |\
 29	 (1L << MD_HAS_PPL) |		\
 30	 (1L << MD_HAS_MULTIPLE_PPLS))
 
 
 
 
 31
 32/*
 33 * inform the user of the raid configuration
 34*/
 35static void dump_zones(struct mddev *mddev)
 36{
 37	int j, k;
 38	sector_t zone_size = 0;
 39	sector_t zone_start = 0;
 
 40	struct r0conf *conf = mddev->private;
 41	int raid_disks = conf->strip_zone[0].nb_dev;
 42	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
 43		 mdname(mddev),
 44		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 45	for (j = 0; j < conf->nr_strip_zones; j++) {
 46		char line[200];
 47		int len = 0;
 48
 49		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 50			len += scnprintf(line+len, 200-len, "%s%pg", k?"/":"",
 51				conf->devlist[j * raid_disks + k]->bdev);
 52		pr_debug("md: zone%d=[%s]\n", j, line);
 
 53
 54		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 55		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 
 56			(unsigned long long)zone_start>>1,
 57			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 58			(unsigned long long)zone_size>>1);
 59		zone_start = conf->strip_zone[j].zone_end;
 60	}
 
 61}
 62
 63static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 64{
 65	int i, c, err;
 66	sector_t curr_zone_end, sectors;
 67	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 68	struct strip_zone *zone;
 69	int cnt;
 
 
 70	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 71	unsigned blksize = 512;
 72
 73	*private_conf = ERR_PTR(-ENOMEM);
 74	if (!conf)
 75		return -ENOMEM;
 76	rdev_for_each(rdev1, mddev) {
 77		pr_debug("md/raid0:%s: looking at %pg\n",
 78			 mdname(mddev),
 79			 rdev1->bdev);
 80		c = 0;
 81
 82		/* round size to chunk_size */
 83		sectors = rdev1->sectors;
 84		sector_div(sectors, mddev->chunk_sectors);
 85		rdev1->sectors = sectors * mddev->chunk_sectors;
 86
 87		blksize = max(blksize, queue_logical_block_size(
 88				      rdev1->bdev->bd_disk->queue));
 89
 90		rdev_for_each(rdev2, mddev) {
 91			pr_debug("md/raid0:%s:   comparing %pg(%llu)"
 92				 " with %pg(%llu)\n",
 93				 mdname(mddev),
 94				 rdev1->bdev,
 95				 (unsigned long long)rdev1->sectors,
 96				 rdev2->bdev,
 97				 (unsigned long long)rdev2->sectors);
 98			if (rdev2 == rdev1) {
 99				pr_debug("md/raid0:%s:   END\n",
100					 mdname(mddev));
101				break;
102			}
103			if (rdev2->sectors == rdev1->sectors) {
104				/*
105				 * Not unique, don't count it as a new
106				 * group
107				 */
108				pr_debug("md/raid0:%s:   EQUAL\n",
109					 mdname(mddev));
110				c = 1;
111				break;
112			}
113			pr_debug("md/raid0:%s:   NOT EQUAL\n",
114				 mdname(mddev));
115		}
116		if (!c) {
117			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
118				 mdname(mddev));
119			conf->nr_strip_zones++;
120			pr_debug("md/raid0:%s: %d zones\n",
121				 mdname(mddev), conf->nr_strip_zones);
122		}
123	}
124	pr_debug("md/raid0:%s: FINAL %d zones\n",
125		 mdname(mddev), conf->nr_strip_zones);
126
127	/*
128	 * now since we have the hard sector sizes, we can make sure
129	 * chunk size is a multiple of that sector size
130	 */
131	if ((mddev->chunk_sectors << 9) % blksize) {
132		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
133			mdname(mddev),
134			mddev->chunk_sectors << 9, blksize);
135		err = -EINVAL;
136		goto abort;
137	}
138
139	err = -ENOMEM;
140	conf->strip_zone = kcalloc(conf->nr_strip_zones,
141				   sizeof(struct strip_zone),
142				   GFP_KERNEL);
143	if (!conf->strip_zone)
144		goto abort;
145	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
146					    conf->nr_strip_zones,
147					    mddev->raid_disks),
148				GFP_KERNEL);
149	if (!conf->devlist)
150		goto abort;
151
152	/* The first zone must contain all devices, so here we check that
153	 * there is a proper alignment of slots to devices and find them all
154	 */
155	zone = &conf->strip_zone[0];
156	cnt = 0;
157	smallest = NULL;
158	dev = conf->devlist;
159	err = -EINVAL;
160	rdev_for_each(rdev1, mddev) {
161		int j = rdev1->raid_disk;
162
163		if (mddev->level == 10) {
164			/* taking over a raid10-n2 array */
165			j /= 2;
166			rdev1->new_raid_disk = j;
167		}
168
169		if (mddev->level == 1) {
170			/* taiking over a raid1 array-
171			 * we have only one active disk
172			 */
173			j = 0;
174			rdev1->new_raid_disk = j;
175		}
176
177		if (j < 0) {
178			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
179				mdname(mddev));
180			goto abort;
181		}
182		if (j >= mddev->raid_disks) {
183			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
184				mdname(mddev), j);
185			goto abort;
186		}
187		if (dev[j]) {
188			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
189				mdname(mddev), j);
190			goto abort;
191		}
192		dev[j] = rdev1;
193
 
 
 
 
 
 
194		if (!smallest || (rdev1->sectors < smallest->sectors))
195			smallest = rdev1;
196		cnt++;
197	}
198	if (cnt != mddev->raid_disks) {
199		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
200			mdname(mddev), cnt, mddev->raid_disks);
201		goto abort;
202	}
203	zone->nb_dev = cnt;
204	zone->zone_end = smallest->sectors * cnt;
205
206	curr_zone_end = zone->zone_end;
207
208	/* now do the other zones */
209	for (i = 1; i < conf->nr_strip_zones; i++)
210	{
211		int j;
212
213		zone = conf->strip_zone + i;
214		dev = conf->devlist + i * mddev->raid_disks;
215
216		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
217		zone->dev_start = smallest->sectors;
218		smallest = NULL;
219		c = 0;
220
221		for (j=0; j<cnt; j++) {
222			rdev = conf->devlist[j];
223			if (rdev->sectors <= zone->dev_start) {
224				pr_debug("md/raid0:%s: checking %pg ... nope\n",
225					 mdname(mddev),
226					 rdev->bdev);
227				continue;
228			}
229			pr_debug("md/raid0:%s: checking %pg ..."
230				 " contained as device %d\n",
231				 mdname(mddev),
232				 rdev->bdev, c);
233			dev[c] = rdev;
234			c++;
235			if (!smallest || rdev->sectors < smallest->sectors) {
236				smallest = rdev;
237				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
238					 mdname(mddev),
239					 (unsigned long long)rdev->sectors);
240			}
241		}
242
243		zone->nb_dev = c;
244		sectors = (smallest->sectors - zone->dev_start) * c;
245		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
246			 mdname(mddev),
247			 zone->nb_dev, (unsigned long long)sectors);
248
249		curr_zone_end += sectors;
250		zone->zone_end = curr_zone_end;
251
252		pr_debug("md/raid0:%s: current zone start: %llu\n",
253			 mdname(mddev),
254			 (unsigned long long)smallest->sectors);
255	}
 
 
256
257	if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
258		conf->layout = RAID0_ORIG_LAYOUT;
259	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
260		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
261		conf->layout = mddev->layout;
262	} else if (default_layout == RAID0_ORIG_LAYOUT ||
263		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
264		conf->layout = default_layout;
265	} else {
266		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
267		       mdname(mddev));
268		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
269		err = -EOPNOTSUPP;
270		goto abort;
271	}
272
273	if (conf->layout == RAID0_ORIG_LAYOUT) {
274		for (i = 1; i < conf->nr_strip_zones; i++) {
275			sector_t first_sector = conf->strip_zone[i-1].zone_end;
276
277			sector_div(first_sector, mddev->chunk_sectors);
278			zone = conf->strip_zone + i;
279			/* disk_shift is first disk index used in the zone */
280			zone->disk_shift = sector_div(first_sector,
281						      zone->nb_dev);
282		}
283	}
284
285	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
286	*private_conf = conf;
287
288	return 0;
289abort:
290	kfree(conf->strip_zone);
291	kfree(conf->devlist);
292	kfree(conf);
293	*private_conf = ERR_PTR(err);
294	return err;
295}
296
297/* Find the zone which holds a particular offset
298 * Update *sectorp to be an offset in that zone
299 */
300static struct strip_zone *find_zone(struct r0conf *conf,
301				    sector_t *sectorp)
302{
303	int i;
304	struct strip_zone *z = conf->strip_zone;
305	sector_t sector = *sectorp;
306
307	for (i = 0; i < conf->nr_strip_zones; i++)
308		if (sector < z[i].zone_end) {
309			if (i)
310				*sectorp = sector - z[i-1].zone_end;
311			return z + i;
312		}
313	BUG();
314}
315
316/*
317 * remaps the bio to the target device. we separate two flows.
318 * power 2 flow and a general flow for the sake of performance
319*/
320static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
321				sector_t sector, sector_t *sector_offset)
322{
323	unsigned int sect_in_chunk;
324	sector_t chunk;
325	struct r0conf *conf = mddev->private;
326	int raid_disks = conf->strip_zone[0].nb_dev;
327	unsigned int chunk_sects = mddev->chunk_sectors;
328
329	if (is_power_of_2(chunk_sects)) {
330		int chunksect_bits = ffz(~chunk_sects);
331		/* find the sector offset inside the chunk */
332		sect_in_chunk  = sector & (chunk_sects - 1);
333		sector >>= chunksect_bits;
334		/* chunk in zone */
335		chunk = *sector_offset;
336		/* quotient is the chunk in real device*/
337		sector_div(chunk, zone->nb_dev << chunksect_bits);
338	} else{
339		sect_in_chunk = sector_div(sector, chunk_sects);
340		chunk = *sector_offset;
341		sector_div(chunk, chunk_sects * zone->nb_dev);
342	}
343	/*
344	*  position the bio over the real device
345	*  real sector = chunk in device + starting of zone
346	*	+ the position in the chunk
347	*/
348	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
349	return conf->devlist[(zone - conf->strip_zone)*raid_disks
350			     + sector_div(sector, zone->nb_dev)];
351}
352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
354{
355	sector_t array_sectors = 0;
356	struct md_rdev *rdev;
357
358	WARN_ONCE(sectors || raid_disks,
359		  "%s does not support generic reshape\n", __func__);
360
361	rdev_for_each(rdev, mddev)
362		array_sectors += (rdev->sectors &
363				  ~(sector_t)(mddev->chunk_sectors-1));
364
365	return array_sectors;
366}
367
368static void free_conf(struct mddev *mddev, struct r0conf *conf)
369{
370	kfree(conf->strip_zone);
371	kfree(conf->devlist);
372	kfree(conf);
373}
374
375static void raid0_free(struct mddev *mddev, void *priv)
376{
377	struct r0conf *conf = priv;
378
379	free_conf(mddev, conf);
380}
381
382static int raid0_run(struct mddev *mddev)
383{
384	struct r0conf *conf;
385	int ret;
386
387	if (mddev->chunk_sectors == 0) {
388		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
 
389		return -EINVAL;
390	}
391	if (md_check_no_bitmap(mddev))
392		return -EINVAL;
 
393
394	/* if private is not null, we are here after takeover */
395	if (mddev->private == NULL) {
396		ret = create_strip_zones(mddev, &conf);
397		if (ret < 0)
398			return ret;
399		mddev->private = conf;
400	}
401	conf = mddev->private;
402	if (mddev->queue) {
403		struct md_rdev *rdev;
404
405		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
406		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
407
408		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
409		blk_queue_io_opt(mddev->queue,
410				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
411
412		rdev_for_each(rdev, mddev) {
413			disk_stack_limits(mddev->gendisk, rdev->bdev,
414					  rdev->data_offset << 9);
415		}
416	}
417
418	/* calculate array device size */
419	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
420
421	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
422		 mdname(mddev),
423		 (unsigned long long)mddev->array_sectors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
 
425	dump_zones(mddev);
426
427	ret = md_integrity_register(mddev);
428	if (ret)
429		free_conf(mddev, conf);
430
431	return ret;
432}
433
434/*
435 * Convert disk_index to the disk order in which it is read/written.
436 *  For example, if we have 4 disks, they are numbered 0,1,2,3. If we
437 *  write the disks starting at disk 3, then the read/write order would
438 *  be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
439 *  to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
440 *  to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
441 *  that 'output' space to understand the read/write disk ordering.
442 */
443static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
444{
445	return ((disk_index + num_disks - disk_shift) % num_disks);
 
 
 
 
 
 
 
446}
447
448static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
 
 
 
 
449{
450	struct r0conf *conf = mddev->private;
451	struct strip_zone *zone;
452	sector_t start = bio->bi_iter.bi_sector;
453	sector_t end;
454	unsigned int stripe_size;
455	sector_t first_stripe_index, last_stripe_index;
456	sector_t start_disk_offset;
457	unsigned int start_disk_index;
458	sector_t end_disk_offset;
459	unsigned int end_disk_index;
460	unsigned int disk;
461	sector_t orig_start, orig_end;
462
463	orig_start = start;
464	zone = find_zone(conf, &start);
465
466	if (bio_end_sector(bio) > zone->zone_end) {
467		struct bio *split = bio_split(bio,
468			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
469			&mddev->bio_set);
470		bio_chain(split, bio);
471		submit_bio_noacct(bio);
472		bio = split;
473		end = zone->zone_end;
474	} else
475		end = bio_end_sector(bio);
476
477	orig_end = end;
478	if (zone != conf->strip_zone)
479		end = end - zone[-1].zone_end;
480
481	/* Now start and end is the offset in zone */
482	stripe_size = zone->nb_dev * mddev->chunk_sectors;
483
484	first_stripe_index = start;
485	sector_div(first_stripe_index, stripe_size);
486	last_stripe_index = end;
487	sector_div(last_stripe_index, stripe_size);
488
489	/* In the first zone the original and alternate layouts are the same */
490	if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
491		sector_div(orig_start, mddev->chunk_sectors);
492		start_disk_index = sector_div(orig_start, zone->nb_dev);
493		start_disk_index = map_disk_shift(start_disk_index,
494						  zone->nb_dev,
495						  zone->disk_shift);
496		sector_div(orig_end, mddev->chunk_sectors);
497		end_disk_index = sector_div(orig_end, zone->nb_dev);
498		end_disk_index = map_disk_shift(end_disk_index,
499						zone->nb_dev, zone->disk_shift);
500	} else {
501		start_disk_index = (int)(start - first_stripe_index * stripe_size) /
502			mddev->chunk_sectors;
503		end_disk_index = (int)(end - last_stripe_index * stripe_size) /
504			mddev->chunk_sectors;
505	}
506	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
507		mddev->chunk_sectors) +
508		first_stripe_index * mddev->chunk_sectors;
509	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
510		mddev->chunk_sectors) +
511		last_stripe_index * mddev->chunk_sectors;
512
513	for (disk = 0; disk < zone->nb_dev; disk++) {
514		sector_t dev_start, dev_end;
515		struct md_rdev *rdev;
516		int compare_disk;
517
518		compare_disk = map_disk_shift(disk, zone->nb_dev,
519					      zone->disk_shift);
520
521		if (compare_disk < start_disk_index)
522			dev_start = (first_stripe_index + 1) *
523				mddev->chunk_sectors;
524		else if (compare_disk > start_disk_index)
525			dev_start = first_stripe_index * mddev->chunk_sectors;
526		else
527			dev_start = start_disk_offset;
528
529		if (compare_disk < end_disk_index)
530			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
531		else if (compare_disk > end_disk_index)
532			dev_end = last_stripe_index * mddev->chunk_sectors;
533		else
534			dev_end = end_disk_offset;
535
536		if (dev_end <= dev_start)
537			continue;
538
539		rdev = conf->devlist[(zone - conf->strip_zone) *
540			conf->strip_zone[0].nb_dev + disk];
541		md_submit_discard_bio(mddev, rdev, bio,
542			dev_start + zone->dev_start + rdev->data_offset,
543			dev_end - dev_start);
544	}
545	bio_endio(bio);
546}
547
548static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
549{
550	struct r0conf *conf = mddev->private;
 
551	struct strip_zone *zone;
552	struct md_rdev *tmp_dev;
553	sector_t bio_sector = bio->bi_iter.bi_sector;
554	sector_t sector = bio_sector;
555
556	md_account_bio(mddev, &bio);
557
558	zone = find_zone(mddev->private, &sector);
559	switch (conf->layout) {
560	case RAID0_ORIG_LAYOUT:
561		tmp_dev = map_sector(mddev, zone, bio_sector, &sector);
562		break;
563	case RAID0_ALT_MULTIZONE_LAYOUT:
564		tmp_dev = map_sector(mddev, zone, sector, &sector);
565		break;
566	default:
567		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
568		bio_io_error(bio);
569		return;
570	}
571
572	if (unlikely(is_rdev_broken(tmp_dev))) {
573		bio_io_error(bio);
574		md_error(mddev, tmp_dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
575		return;
576	}
577
578	bio_set_dev(bio, tmp_dev->bdev);
579	bio->bi_iter.bi_sector = sector + zone->dev_start +
 
 
 
 
580		tmp_dev->data_offset;
581
582	if (mddev->gendisk)
583		trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
584				      bio_sector);
585	mddev_check_write_zeroes(mddev, bio);
586	submit_bio_noacct(bio);
587}
588
589static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
590{
591	sector_t sector;
592	unsigned chunk_sects;
593	unsigned sectors;
594
595	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
596	    && md_flush_request(mddev, bio))
597		return true;
598
599	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
600		raid0_handle_discard(mddev, bio);
601		return true;
602	}
603
604	sector = bio->bi_iter.bi_sector;
605	chunk_sects = mddev->chunk_sectors;
606
607	sectors = chunk_sects -
608		(likely(is_power_of_2(chunk_sects))
609		 ? (sector & (chunk_sects-1))
610		 : sector_div(sector, chunk_sects));
611
612	if (sectors < bio_sectors(bio)) {
613		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
614					      &mddev->bio_set);
615		bio_chain(split, bio);
616		raid0_map_submit_bio(mddev, bio);
617		bio = split;
618	}
619
620	raid0_map_submit_bio(mddev, bio);
621	return true;
622}
623
624static void raid0_status(struct seq_file *seq, struct mddev *mddev)
625{
626	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
627	return;
628}
629
630static void raid0_error(struct mddev *mddev, struct md_rdev *rdev)
631{
632	if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
633		char *md_name = mdname(mddev);
634
635		pr_crit("md/raid0%s: Disk failure on %pg detected, failing array.\n",
636			md_name, rdev->bdev);
637	}
638}
639
640static void *raid0_takeover_raid45(struct mddev *mddev)
641{
642	struct md_rdev *rdev;
643	struct r0conf *priv_conf;
644
645	if (mddev->degraded != 1) {
646		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
647			mdname(mddev),
648			mddev->degraded);
649		return ERR_PTR(-EINVAL);
650	}
651
652	rdev_for_each(rdev, mddev) {
653		/* check slot number for a disk */
654		if (rdev->raid_disk == mddev->raid_disks-1) {
655			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
656				mdname(mddev));
657			return ERR_PTR(-EINVAL);
658		}
659		rdev->sectors = mddev->dev_sectors;
660	}
661
662	/* Set new parameters */
663	mddev->new_level = 0;
664	mddev->new_layout = 0;
665	mddev->new_chunk_sectors = mddev->chunk_sectors;
666	mddev->raid_disks--;
667	mddev->delta_disks = -1;
668	/* make sure it will be not marked as dirty */
669	mddev->recovery_cp = MaxSector;
670	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
671
672	create_strip_zones(mddev, &priv_conf);
673
674	return priv_conf;
675}
676
677static void *raid0_takeover_raid10(struct mddev *mddev)
678{
679	struct r0conf *priv_conf;
680
681	/* Check layout:
682	 *  - far_copies must be 1
683	 *  - near_copies must be 2
684	 *  - disks number must be even
685	 *  - all mirrors must be already degraded
686	 */
687	if (mddev->layout != ((1 << 8) + 2)) {
688		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
689			mdname(mddev),
690			mddev->layout);
691		return ERR_PTR(-EINVAL);
692	}
693	if (mddev->raid_disks & 1) {
694		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
695			mdname(mddev));
696		return ERR_PTR(-EINVAL);
697	}
698	if (mddev->degraded != (mddev->raid_disks>>1)) {
699		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
700			mdname(mddev));
701		return ERR_PTR(-EINVAL);
702	}
703
704	/* Set new parameters */
705	mddev->new_level = 0;
706	mddev->new_layout = 0;
707	mddev->new_chunk_sectors = mddev->chunk_sectors;
708	mddev->delta_disks = - mddev->raid_disks / 2;
709	mddev->raid_disks += mddev->delta_disks;
710	mddev->degraded = 0;
711	/* make sure it will be not marked as dirty */
712	mddev->recovery_cp = MaxSector;
713	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
714
715	create_strip_zones(mddev, &priv_conf);
716	return priv_conf;
717}
718
719static void *raid0_takeover_raid1(struct mddev *mddev)
720{
721	struct r0conf *priv_conf;
722	int chunksect;
723
724	/* Check layout:
725	 *  - (N - 1) mirror drives must be already faulty
726	 */
727	if ((mddev->raid_disks - 1) != mddev->degraded) {
728		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
729		       mdname(mddev));
730		return ERR_PTR(-EINVAL);
731	}
732
733	/*
734	 * a raid1 doesn't have the notion of chunk size, so
735	 * figure out the largest suitable size we can use.
736	 */
737	chunksect = 64 * 2; /* 64K by default */
738
739	/* The array must be an exact multiple of chunksize */
740	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
741		chunksect >>= 1;
742
743	if ((chunksect << 9) < PAGE_SIZE)
744		/* array size does not allow a suitable chunk size */
745		return ERR_PTR(-EINVAL);
746
747	/* Set new parameters */
748	mddev->new_level = 0;
749	mddev->new_layout = 0;
750	mddev->new_chunk_sectors = chunksect;
751	mddev->chunk_sectors = chunksect;
752	mddev->delta_disks = 1 - mddev->raid_disks;
753	mddev->raid_disks = 1;
754	/* make sure it will be not marked as dirty */
755	mddev->recovery_cp = MaxSector;
756	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
757
758	create_strip_zones(mddev, &priv_conf);
759	return priv_conf;
760}
761
762static void *raid0_takeover(struct mddev *mddev)
763{
764	/* raid0 can take over:
765	 *  raid4 - if all data disks are active.
766	 *  raid5 - providing it is Raid4 layout and one disk is faulty
767	 *  raid10 - assuming we have all necessary active disks
768	 *  raid1 - with (N -1) mirror drives faulty
769	 */
770
771	if (mddev->bitmap) {
772		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
773			mdname(mddev));
774		return ERR_PTR(-EBUSY);
775	}
776	if (mddev->level == 4)
777		return raid0_takeover_raid45(mddev);
778
779	if (mddev->level == 5) {
780		if (mddev->layout == ALGORITHM_PARITY_N)
781			return raid0_takeover_raid45(mddev);
782
783		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
784			mdname(mddev), ALGORITHM_PARITY_N);
785	}
786
787	if (mddev->level == 10)
788		return raid0_takeover_raid10(mddev);
789
790	if (mddev->level == 1)
791		return raid0_takeover_raid1(mddev);
792
793	pr_warn("Takeover from raid%i to raid0 not supported\n",
794		mddev->level);
795
796	return ERR_PTR(-EINVAL);
797}
798
799static void raid0_quiesce(struct mddev *mddev, int quiesce)
800{
801}
802
803static struct md_personality raid0_personality=
804{
805	.name		= "raid0",
806	.level		= 0,
807	.owner		= THIS_MODULE,
808	.make_request	= raid0_make_request,
809	.run		= raid0_run,
810	.free		= raid0_free,
811	.status		= raid0_status,
812	.size		= raid0_size,
813	.takeover	= raid0_takeover,
814	.quiesce	= raid0_quiesce,
815	.error_handler	= raid0_error,
816};
817
818static int __init raid0_init (void)
819{
820	return register_md_personality (&raid0_personality);
821}
822
823static void raid0_exit (void)
824{
825	unregister_md_personality (&raid0_personality);
826}
827
828module_init(raid0_init);
829module_exit(raid0_exit);
830MODULE_LICENSE("GPL");
831MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
832MODULE_ALIAS("md-personality-2"); /* RAID0 */
833MODULE_ALIAS("md-raid0");
834MODULE_ALIAS("md-level-0");