Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3   raid0.c : Multiple Devices driver for Linux
  4	     Copyright (C) 1994-96 Marc ZYNGIER
  5	     <zyngier@ufr-info-p7.ibp.fr> or
  6	     <maz@gloups.fdn.fr>
  7	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  8
  9   RAID-0 management functions.
 10
 
 
 
 
 
 
 
 
 11*/
 12
 13#include <linux/blkdev.h>
 14#include <linux/seq_file.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17#include <trace/events/block.h>
 18#include "md.h"
 19#include "raid0.h"
 20#include "raid5.h"
 21
 22static int default_layout = 0;
 23module_param(default_layout, int, 0644);
 24
 25#define UNSUPPORTED_MDDEV_FLAGS		\
 26	((1L << MD_HAS_JOURNAL) |	\
 27	 (1L << MD_JOURNAL_CLEAN) |	\
 28	 (1L << MD_FAILFAST_SUPPORTED) |\
 29	 (1L << MD_HAS_PPL) |		\
 30	 (1L << MD_HAS_MULTIPLE_PPLS))
 31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32/*
 33 * inform the user of the raid configuration
 34*/
 35static void dump_zones(struct mddev *mddev)
 36{
 37	int j, k;
 38	sector_t zone_size = 0;
 39	sector_t zone_start = 0;
 40	char b[BDEVNAME_SIZE];
 41	struct r0conf *conf = mddev->private;
 42	int raid_disks = conf->strip_zone[0].nb_dev;
 43	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
 44		 mdname(mddev),
 45		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 46	for (j = 0; j < conf->nr_strip_zones; j++) {
 47		char line[200];
 48		int len = 0;
 49
 50		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 51			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
 52					bdevname(conf->devlist[j*raid_disks
 53							       + k]->bdev, b));
 54		pr_debug("md: zone%d=[%s]\n", j, line);
 55
 56		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 57		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 58			(unsigned long long)zone_start>>1,
 59			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 60			(unsigned long long)zone_size>>1);
 61		zone_start = conf->strip_zone[j].zone_end;
 62	}
 63}
 64
 65static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 66{
 67	int i, c, err;
 68	sector_t curr_zone_end, sectors;
 69	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 70	struct strip_zone *zone;
 71	int cnt;
 72	char b[BDEVNAME_SIZE];
 73	char b2[BDEVNAME_SIZE];
 74	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 75	unsigned blksize = 512;
 76
 77	*private_conf = ERR_PTR(-ENOMEM);
 78	if (!conf)
 79		return -ENOMEM;
 80	rdev_for_each(rdev1, mddev) {
 81		pr_debug("md/raid0:%s: looking at %s\n",
 82			 mdname(mddev),
 83			 bdevname(rdev1->bdev, b));
 84		c = 0;
 85
 86		/* round size to chunk_size */
 87		sectors = rdev1->sectors;
 88		sector_div(sectors, mddev->chunk_sectors);
 89		rdev1->sectors = sectors * mddev->chunk_sectors;
 90
 91		blksize = max(blksize, queue_logical_block_size(
 92				      rdev1->bdev->bd_disk->queue));
 93
 94		rdev_for_each(rdev2, mddev) {
 95			pr_debug("md/raid0:%s:   comparing %s(%llu)"
 96				 " with %s(%llu)\n",
 97				 mdname(mddev),
 98				 bdevname(rdev1->bdev,b),
 99				 (unsigned long long)rdev1->sectors,
100				 bdevname(rdev2->bdev,b2),
101				 (unsigned long long)rdev2->sectors);
102			if (rdev2 == rdev1) {
103				pr_debug("md/raid0:%s:   END\n",
104					 mdname(mddev));
105				break;
106			}
107			if (rdev2->sectors == rdev1->sectors) {
108				/*
109				 * Not unique, don't count it as a new
110				 * group
111				 */
112				pr_debug("md/raid0:%s:   EQUAL\n",
113					 mdname(mddev));
114				c = 1;
115				break;
116			}
117			pr_debug("md/raid0:%s:   NOT EQUAL\n",
118				 mdname(mddev));
119		}
120		if (!c) {
121			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
122				 mdname(mddev));
123			conf->nr_strip_zones++;
124			pr_debug("md/raid0:%s: %d zones\n",
125				 mdname(mddev), conf->nr_strip_zones);
126		}
127	}
128	pr_debug("md/raid0:%s: FINAL %d zones\n",
129		 mdname(mddev), conf->nr_strip_zones);
130
131	if (conf->nr_strip_zones == 1) {
132		conf->layout = RAID0_ORIG_LAYOUT;
133	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
134		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
135		conf->layout = mddev->layout;
136	} else if (default_layout == RAID0_ORIG_LAYOUT ||
137		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
138		conf->layout = default_layout;
139	} else {
140		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
141		       mdname(mddev));
142		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
143		err = -ENOTSUPP;
144		goto abort;
145	}
146	/*
147	 * now since we have the hard sector sizes, we can make sure
148	 * chunk size is a multiple of that sector size
149	 */
150	if ((mddev->chunk_sectors << 9) % blksize) {
151		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
152			mdname(mddev),
153			mddev->chunk_sectors << 9, blksize);
154		err = -EINVAL;
155		goto abort;
156	}
157
158	err = -ENOMEM;
159	conf->strip_zone = kcalloc(conf->nr_strip_zones,
160				   sizeof(struct strip_zone),
161				   GFP_KERNEL);
162	if (!conf->strip_zone)
163		goto abort;
164	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
165					    conf->nr_strip_zones,
166					    mddev->raid_disks),
167				GFP_KERNEL);
168	if (!conf->devlist)
169		goto abort;
170
171	/* The first zone must contain all devices, so here we check that
172	 * there is a proper alignment of slots to devices and find them all
173	 */
174	zone = &conf->strip_zone[0];
175	cnt = 0;
176	smallest = NULL;
177	dev = conf->devlist;
178	err = -EINVAL;
179	rdev_for_each(rdev1, mddev) {
180		int j = rdev1->raid_disk;
181
182		if (mddev->level == 10) {
183			/* taking over a raid10-n2 array */
184			j /= 2;
185			rdev1->new_raid_disk = j;
186		}
187
188		if (mddev->level == 1) {
189			/* taiking over a raid1 array-
190			 * we have only one active disk
191			 */
192			j = 0;
193			rdev1->new_raid_disk = j;
194		}
195
196		if (j < 0) {
197			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
198				mdname(mddev));
199			goto abort;
200		}
201		if (j >= mddev->raid_disks) {
202			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
203				mdname(mddev), j);
204			goto abort;
205		}
206		if (dev[j]) {
207			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
208				mdname(mddev), j);
209			goto abort;
210		}
211		dev[j] = rdev1;
212
213		if (!smallest || (rdev1->sectors < smallest->sectors))
214			smallest = rdev1;
215		cnt++;
216	}
217	if (cnt != mddev->raid_disks) {
218		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
219			mdname(mddev), cnt, mddev->raid_disks);
220		goto abort;
221	}
222	zone->nb_dev = cnt;
223	zone->zone_end = smallest->sectors * cnt;
224
225	curr_zone_end = zone->zone_end;
226
227	/* now do the other zones */
228	for (i = 1; i < conf->nr_strip_zones; i++)
229	{
230		int j;
231
232		zone = conf->strip_zone + i;
233		dev = conf->devlist + i * mddev->raid_disks;
234
235		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
236		zone->dev_start = smallest->sectors;
237		smallest = NULL;
238		c = 0;
239
240		for (j=0; j<cnt; j++) {
241			rdev = conf->devlist[j];
242			if (rdev->sectors <= zone->dev_start) {
243				pr_debug("md/raid0:%s: checking %s ... nope\n",
244					 mdname(mddev),
245					 bdevname(rdev->bdev, b));
246				continue;
247			}
248			pr_debug("md/raid0:%s: checking %s ..."
249				 " contained as device %d\n",
250				 mdname(mddev),
251				 bdevname(rdev->bdev, b), c);
252			dev[c] = rdev;
253			c++;
254			if (!smallest || rdev->sectors < smallest->sectors) {
255				smallest = rdev;
256				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
257					 mdname(mddev),
258					 (unsigned long long)rdev->sectors);
259			}
260		}
261
262		zone->nb_dev = c;
263		sectors = (smallest->sectors - zone->dev_start) * c;
264		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
265			 mdname(mddev),
266			 zone->nb_dev, (unsigned long long)sectors);
267
268		curr_zone_end += sectors;
269		zone->zone_end = curr_zone_end;
270
271		pr_debug("md/raid0:%s: current zone start: %llu\n",
272			 mdname(mddev),
273			 (unsigned long long)smallest->sectors);
274	}
275
276	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
277	*private_conf = conf;
278
279	return 0;
280abort:
281	kfree(conf->strip_zone);
282	kfree(conf->devlist);
283	kfree(conf);
284	*private_conf = ERR_PTR(err);
285	return err;
286}
287
288/* Find the zone which holds a particular offset
289 * Update *sectorp to be an offset in that zone
290 */
291static struct strip_zone *find_zone(struct r0conf *conf,
292				    sector_t *sectorp)
293{
294	int i;
295	struct strip_zone *z = conf->strip_zone;
296	sector_t sector = *sectorp;
297
298	for (i = 0; i < conf->nr_strip_zones; i++)
299		if (sector < z[i].zone_end) {
300			if (i)
301				*sectorp = sector - z[i-1].zone_end;
302			return z + i;
303		}
304	BUG();
305}
306
307/*
308 * remaps the bio to the target device. we separate two flows.
309 * power 2 flow and a general flow for the sake of performance
310*/
311static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
312				sector_t sector, sector_t *sector_offset)
313{
314	unsigned int sect_in_chunk;
315	sector_t chunk;
316	struct r0conf *conf = mddev->private;
317	int raid_disks = conf->strip_zone[0].nb_dev;
318	unsigned int chunk_sects = mddev->chunk_sectors;
319
320	if (is_power_of_2(chunk_sects)) {
321		int chunksect_bits = ffz(~chunk_sects);
322		/* find the sector offset inside the chunk */
323		sect_in_chunk  = sector & (chunk_sects - 1);
324		sector >>= chunksect_bits;
325		/* chunk in zone */
326		chunk = *sector_offset;
327		/* quotient is the chunk in real device*/
328		sector_div(chunk, zone->nb_dev << chunksect_bits);
329	} else{
330		sect_in_chunk = sector_div(sector, chunk_sects);
331		chunk = *sector_offset;
332		sector_div(chunk, chunk_sects * zone->nb_dev);
333	}
334	/*
335	*  position the bio over the real device
336	*  real sector = chunk in device + starting of zone
337	*	+ the position in the chunk
338	*/
339	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
340	return conf->devlist[(zone - conf->strip_zone)*raid_disks
341			     + sector_div(sector, zone->nb_dev)];
342}
343
344static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
345{
346	sector_t array_sectors = 0;
347	struct md_rdev *rdev;
348
349	WARN_ONCE(sectors || raid_disks,
350		  "%s does not support generic reshape\n", __func__);
351
352	rdev_for_each(rdev, mddev)
353		array_sectors += (rdev->sectors &
354				  ~(sector_t)(mddev->chunk_sectors-1));
355
356	return array_sectors;
357}
358
359static void raid0_free(struct mddev *mddev, void *priv);
360
361static int raid0_run(struct mddev *mddev)
362{
363	struct r0conf *conf;
364	int ret;
365
366	if (mddev->chunk_sectors == 0) {
367		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
368		return -EINVAL;
369	}
370	if (md_check_no_bitmap(mddev))
371		return -EINVAL;
372
373	/* if private is not null, we are here after takeover */
374	if (mddev->private == NULL) {
375		ret = create_strip_zones(mddev, &conf);
376		if (ret < 0)
377			return ret;
378		mddev->private = conf;
379	}
380	conf = mddev->private;
381	if (mddev->queue) {
382		struct md_rdev *rdev;
383		bool discard_supported = false;
384
385		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
386		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
387		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
388		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
389
390		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
391		blk_queue_io_opt(mddev->queue,
392				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
393
394		rdev_for_each(rdev, mddev) {
395			disk_stack_limits(mddev->gendisk, rdev->bdev,
396					  rdev->data_offset << 9);
397			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
398				discard_supported = true;
399		}
400		if (!discard_supported)
401			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
402		else
403			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
404	}
405
406	/* calculate array device size */
407	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
408
409	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
410		 mdname(mddev),
411		 (unsigned long long)mddev->array_sectors);
412
413	if (mddev->queue) {
414		/* calculate the max read-ahead size.
415		 * For read-ahead of large files to be effective, we need to
416		 * readahead at least twice a whole stripe. i.e. number of devices
417		 * multiplied by chunk size times 2.
418		 * If an individual device has an ra_pages greater than the
419		 * chunk size, then we will not drive that device as hard as it
420		 * wants.  We consider this a configuration error: a larger
421		 * chunksize should be used in that case.
422		 */
423		int stripe = mddev->raid_disks *
424			(mddev->chunk_sectors << 9) / PAGE_SIZE;
425		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
426			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
427	}
428
429	dump_zones(mddev);
430
431	ret = md_integrity_register(mddev);
432
433	return ret;
434}
435
436static void raid0_free(struct mddev *mddev, void *priv)
437{
438	struct r0conf *conf = priv;
439
440	kfree(conf->strip_zone);
441	kfree(conf->devlist);
442	kfree(conf);
443}
444
445/*
446 * Is io distribute over 1 or more chunks ?
447*/
448static inline int is_io_in_chunk_boundary(struct mddev *mddev,
449			unsigned int chunk_sects, struct bio *bio)
450{
451	if (likely(is_power_of_2(chunk_sects))) {
452		return chunk_sects >=
453			((bio->bi_iter.bi_sector & (chunk_sects-1))
454					+ bio_sectors(bio));
455	} else{
456		sector_t sector = bio->bi_iter.bi_sector;
457		return chunk_sects >= (sector_div(sector, chunk_sects)
458						+ bio_sectors(bio));
459	}
460}
461
462static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
463{
464	struct r0conf *conf = mddev->private;
465	struct strip_zone *zone;
466	sector_t start = bio->bi_iter.bi_sector;
467	sector_t end;
468	unsigned int stripe_size;
469	sector_t first_stripe_index, last_stripe_index;
470	sector_t start_disk_offset;
471	unsigned int start_disk_index;
472	sector_t end_disk_offset;
473	unsigned int end_disk_index;
474	unsigned int disk;
475
476	zone = find_zone(conf, &start);
477
478	if (bio_end_sector(bio) > zone->zone_end) {
479		struct bio *split = bio_split(bio,
480			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
481			&mddev->bio_set);
482		bio_chain(split, bio);
483		submit_bio_noacct(bio);
484		bio = split;
485		end = zone->zone_end;
486	} else
487		end = bio_end_sector(bio);
488
489	if (zone != conf->strip_zone)
490		end = end - zone[-1].zone_end;
491
492	/* Now start and end is the offset in zone */
493	stripe_size = zone->nb_dev * mddev->chunk_sectors;
494
495	first_stripe_index = start;
496	sector_div(first_stripe_index, stripe_size);
497	last_stripe_index = end;
498	sector_div(last_stripe_index, stripe_size);
499
500	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
501		mddev->chunk_sectors;
502	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
503		mddev->chunk_sectors) +
504		first_stripe_index * mddev->chunk_sectors;
505	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
506		mddev->chunk_sectors;
507	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
508		mddev->chunk_sectors) +
509		last_stripe_index * mddev->chunk_sectors;
510
511	for (disk = 0; disk < zone->nb_dev; disk++) {
512		sector_t dev_start, dev_end;
513		struct bio *discard_bio = NULL;
514		struct md_rdev *rdev;
515
516		if (disk < start_disk_index)
517			dev_start = (first_stripe_index + 1) *
518				mddev->chunk_sectors;
519		else if (disk > start_disk_index)
520			dev_start = first_stripe_index * mddev->chunk_sectors;
521		else
522			dev_start = start_disk_offset;
523
524		if (disk < end_disk_index)
525			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
526		else if (disk > end_disk_index)
527			dev_end = last_stripe_index * mddev->chunk_sectors;
528		else
529			dev_end = end_disk_offset;
530
531		if (dev_end <= dev_start)
532			continue;
533
534		rdev = conf->devlist[(zone - conf->strip_zone) *
535			conf->strip_zone[0].nb_dev + disk];
536		if (__blkdev_issue_discard(rdev->bdev,
537			dev_start + zone->dev_start + rdev->data_offset,
538			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
539		    !discard_bio)
540			continue;
541		bio_chain(discard_bio, bio);
542		bio_clone_blkg_association(discard_bio, bio);
543		if (mddev->gendisk)
544			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
545				discard_bio, disk_devt(mddev->gendisk),
546				bio->bi_iter.bi_sector);
547		submit_bio_noacct(discard_bio);
548	}
549	bio_endio(bio);
550}
551
552static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
553{
554	struct r0conf *conf = mddev->private;
555	struct strip_zone *zone;
556	struct md_rdev *tmp_dev;
557	sector_t bio_sector;
558	sector_t sector;
559	sector_t orig_sector;
560	unsigned chunk_sects;
561	unsigned sectors;
562
563	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
564	    && md_flush_request(mddev, bio))
565		return true;
 
566
567	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
568		raid0_handle_discard(mddev, bio);
569		return true;
570	}
571
572	bio_sector = bio->bi_iter.bi_sector;
573	sector = bio_sector;
574	chunk_sects = mddev->chunk_sectors;
575
576	sectors = chunk_sects -
577		(likely(is_power_of_2(chunk_sects))
578		 ? (sector & (chunk_sects-1))
579		 : sector_div(sector, chunk_sects));
580
581	/* Restore due to sector_div */
582	sector = bio_sector;
583
584	if (sectors < bio_sectors(bio)) {
585		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
586					      &mddev->bio_set);
587		bio_chain(split, bio);
588		submit_bio_noacct(bio);
589		bio = split;
590	}
591
592	orig_sector = sector;
593	zone = find_zone(mddev->private, &sector);
594	switch (conf->layout) {
595	case RAID0_ORIG_LAYOUT:
596		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
597		break;
598	case RAID0_ALT_MULTIZONE_LAYOUT:
599		tmp_dev = map_sector(mddev, zone, sector, &sector);
600		break;
601	default:
602		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
603		bio_io_error(bio);
604		return true;
605	}
606
607	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
608		bio_io_error(bio);
609		return true;
610	}
611
612	bio_set_dev(bio, tmp_dev->bdev);
613	bio->bi_iter.bi_sector = sector + zone->dev_start +
614		tmp_dev->data_offset;
615
616	if (mddev->gendisk)
617		trace_block_bio_remap(bio->bi_disk->queue, bio,
618				disk_devt(mddev->gendisk), bio_sector);
619	mddev_check_writesame(mddev, bio);
620	mddev_check_write_zeroes(mddev, bio);
621	submit_bio_noacct(bio);
622	return true;
623}
624
625static void raid0_status(struct seq_file *seq, struct mddev *mddev)
626{
627	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
628	return;
629}
630
631static void *raid0_takeover_raid45(struct mddev *mddev)
632{
633	struct md_rdev *rdev;
634	struct r0conf *priv_conf;
635
636	if (mddev->degraded != 1) {
637		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
638			mdname(mddev),
639			mddev->degraded);
640		return ERR_PTR(-EINVAL);
641	}
642
643	rdev_for_each(rdev, mddev) {
644		/* check slot number for a disk */
645		if (rdev->raid_disk == mddev->raid_disks-1) {
646			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
647				mdname(mddev));
648			return ERR_PTR(-EINVAL);
649		}
650		rdev->sectors = mddev->dev_sectors;
651	}
652
653	/* Set new parameters */
654	mddev->new_level = 0;
655	mddev->new_layout = 0;
656	mddev->new_chunk_sectors = mddev->chunk_sectors;
657	mddev->raid_disks--;
658	mddev->delta_disks = -1;
659	/* make sure it will be not marked as dirty */
660	mddev->recovery_cp = MaxSector;
661	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
662
663	create_strip_zones(mddev, &priv_conf);
664
665	return priv_conf;
666}
667
668static void *raid0_takeover_raid10(struct mddev *mddev)
669{
670	struct r0conf *priv_conf;
671
672	/* Check layout:
673	 *  - far_copies must be 1
674	 *  - near_copies must be 2
675	 *  - disks number must be even
676	 *  - all mirrors must be already degraded
677	 */
678	if (mddev->layout != ((1 << 8) + 2)) {
679		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
680			mdname(mddev),
681			mddev->layout);
682		return ERR_PTR(-EINVAL);
683	}
684	if (mddev->raid_disks & 1) {
685		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
686			mdname(mddev));
687		return ERR_PTR(-EINVAL);
688	}
689	if (mddev->degraded != (mddev->raid_disks>>1)) {
690		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
691			mdname(mddev));
692		return ERR_PTR(-EINVAL);
693	}
694
695	/* Set new parameters */
696	mddev->new_level = 0;
697	mddev->new_layout = 0;
698	mddev->new_chunk_sectors = mddev->chunk_sectors;
699	mddev->delta_disks = - mddev->raid_disks / 2;
700	mddev->raid_disks += mddev->delta_disks;
701	mddev->degraded = 0;
702	/* make sure it will be not marked as dirty */
703	mddev->recovery_cp = MaxSector;
704	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
705
706	create_strip_zones(mddev, &priv_conf);
707	return priv_conf;
708}
709
710static void *raid0_takeover_raid1(struct mddev *mddev)
711{
712	struct r0conf *priv_conf;
713	int chunksect;
714
715	/* Check layout:
716	 *  - (N - 1) mirror drives must be already faulty
717	 */
718	if ((mddev->raid_disks - 1) != mddev->degraded) {
719		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
720		       mdname(mddev));
721		return ERR_PTR(-EINVAL);
722	}
723
724	/*
725	 * a raid1 doesn't have the notion of chunk size, so
726	 * figure out the largest suitable size we can use.
727	 */
728	chunksect = 64 * 2; /* 64K by default */
729
730	/* The array must be an exact multiple of chunksize */
731	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
732		chunksect >>= 1;
733
734	if ((chunksect << 9) < PAGE_SIZE)
735		/* array size does not allow a suitable chunk size */
736		return ERR_PTR(-EINVAL);
737
738	/* Set new parameters */
739	mddev->new_level = 0;
740	mddev->new_layout = 0;
741	mddev->new_chunk_sectors = chunksect;
742	mddev->chunk_sectors = chunksect;
743	mddev->delta_disks = 1 - mddev->raid_disks;
744	mddev->raid_disks = 1;
745	/* make sure it will be not marked as dirty */
746	mddev->recovery_cp = MaxSector;
747	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
748
749	create_strip_zones(mddev, &priv_conf);
750	return priv_conf;
751}
752
753static void *raid0_takeover(struct mddev *mddev)
754{
755	/* raid0 can take over:
756	 *  raid4 - if all data disks are active.
757	 *  raid5 - providing it is Raid4 layout and one disk is faulty
758	 *  raid10 - assuming we have all necessary active disks
759	 *  raid1 - with (N -1) mirror drives faulty
760	 */
761
762	if (mddev->bitmap) {
763		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
764			mdname(mddev));
765		return ERR_PTR(-EBUSY);
766	}
767	if (mddev->level == 4)
768		return raid0_takeover_raid45(mddev);
769
770	if (mddev->level == 5) {
771		if (mddev->layout == ALGORITHM_PARITY_N)
772			return raid0_takeover_raid45(mddev);
773
774		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
775			mdname(mddev), ALGORITHM_PARITY_N);
776	}
777
778	if (mddev->level == 10)
779		return raid0_takeover_raid10(mddev);
780
781	if (mddev->level == 1)
782		return raid0_takeover_raid1(mddev);
783
784	pr_warn("Takeover from raid%i to raid0 not supported\n",
785		mddev->level);
786
787	return ERR_PTR(-EINVAL);
788}
789
790static void raid0_quiesce(struct mddev *mddev, int quiesce)
791{
792}
793
794static struct md_personality raid0_personality=
795{
796	.name		= "raid0",
797	.level		= 0,
798	.owner		= THIS_MODULE,
799	.make_request	= raid0_make_request,
800	.run		= raid0_run,
801	.free		= raid0_free,
802	.status		= raid0_status,
803	.size		= raid0_size,
804	.takeover	= raid0_takeover,
805	.quiesce	= raid0_quiesce,
 
806};
807
808static int __init raid0_init (void)
809{
810	return register_md_personality (&raid0_personality);
811}
812
813static void raid0_exit (void)
814{
815	unregister_md_personality (&raid0_personality);
816}
817
818module_init(raid0_init);
819module_exit(raid0_exit);
820MODULE_LICENSE("GPL");
821MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
822MODULE_ALIAS("md-personality-2"); /* RAID0 */
823MODULE_ALIAS("md-raid0");
824MODULE_ALIAS("md-level-0");
v4.17
 
  1/*
  2   raid0.c : Multiple Devices driver for Linux
  3	     Copyright (C) 1994-96 Marc ZYNGIER
  4	     <zyngier@ufr-info-p7.ibp.fr> or
  5	     <maz@gloups.fdn.fr>
  6	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  7
  8   RAID-0 management functions.
  9
 10   This program is free software; you can redistribute it and/or modify
 11   it under the terms of the GNU General Public License as published by
 12   the Free Software Foundation; either version 2, or (at your option)
 13   any later version.
 14
 15   You should have received a copy of the GNU General Public License
 16   (for example /usr/src/linux/COPYING); if not, write to the Free
 17   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 18*/
 19
 20#include <linux/blkdev.h>
 21#include <linux/seq_file.h>
 22#include <linux/module.h>
 23#include <linux/slab.h>
 24#include <trace/events/block.h>
 25#include "md.h"
 26#include "raid0.h"
 27#include "raid5.h"
 28
 
 
 
 29#define UNSUPPORTED_MDDEV_FLAGS		\
 30	((1L << MD_HAS_JOURNAL) |	\
 31	 (1L << MD_JOURNAL_CLEAN) |	\
 32	 (1L << MD_FAILFAST_SUPPORTED) |\
 33	 (1L << MD_HAS_PPL) |		\
 34	 (1L << MD_HAS_MULTIPLE_PPLS))
 35
 36static int raid0_congested(struct mddev *mddev, int bits)
 37{
 38	struct r0conf *conf = mddev->private;
 39	struct md_rdev **devlist = conf->devlist;
 40	int raid_disks = conf->strip_zone[0].nb_dev;
 41	int i, ret = 0;
 42
 43	for (i = 0; i < raid_disks && !ret ; i++) {
 44		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
 45
 46		ret |= bdi_congested(q->backing_dev_info, bits);
 47	}
 48	return ret;
 49}
 50
 51/*
 52 * inform the user of the raid configuration
 53*/
 54static void dump_zones(struct mddev *mddev)
 55{
 56	int j, k;
 57	sector_t zone_size = 0;
 58	sector_t zone_start = 0;
 59	char b[BDEVNAME_SIZE];
 60	struct r0conf *conf = mddev->private;
 61	int raid_disks = conf->strip_zone[0].nb_dev;
 62	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
 63		 mdname(mddev),
 64		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 65	for (j = 0; j < conf->nr_strip_zones; j++) {
 66		char line[200];
 67		int len = 0;
 68
 69		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 70			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
 71					bdevname(conf->devlist[j*raid_disks
 72							       + k]->bdev, b));
 73		pr_debug("md: zone%d=[%s]\n", j, line);
 74
 75		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 76		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 77			(unsigned long long)zone_start>>1,
 78			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 79			(unsigned long long)zone_size>>1);
 80		zone_start = conf->strip_zone[j].zone_end;
 81	}
 82}
 83
 84static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 85{
 86	int i, c, err;
 87	sector_t curr_zone_end, sectors;
 88	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 89	struct strip_zone *zone;
 90	int cnt;
 91	char b[BDEVNAME_SIZE];
 92	char b2[BDEVNAME_SIZE];
 93	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 94	unsigned short blksize = 512;
 95
 96	*private_conf = ERR_PTR(-ENOMEM);
 97	if (!conf)
 98		return -ENOMEM;
 99	rdev_for_each(rdev1, mddev) {
100		pr_debug("md/raid0:%s: looking at %s\n",
101			 mdname(mddev),
102			 bdevname(rdev1->bdev, b));
103		c = 0;
104
105		/* round size to chunk_size */
106		sectors = rdev1->sectors;
107		sector_div(sectors, mddev->chunk_sectors);
108		rdev1->sectors = sectors * mddev->chunk_sectors;
109
110		blksize = max(blksize, queue_logical_block_size(
111				      rdev1->bdev->bd_disk->queue));
112
113		rdev_for_each(rdev2, mddev) {
114			pr_debug("md/raid0:%s:   comparing %s(%llu)"
115				 " with %s(%llu)\n",
116				 mdname(mddev),
117				 bdevname(rdev1->bdev,b),
118				 (unsigned long long)rdev1->sectors,
119				 bdevname(rdev2->bdev,b2),
120				 (unsigned long long)rdev2->sectors);
121			if (rdev2 == rdev1) {
122				pr_debug("md/raid0:%s:   END\n",
123					 mdname(mddev));
124				break;
125			}
126			if (rdev2->sectors == rdev1->sectors) {
127				/*
128				 * Not unique, don't count it as a new
129				 * group
130				 */
131				pr_debug("md/raid0:%s:   EQUAL\n",
132					 mdname(mddev));
133				c = 1;
134				break;
135			}
136			pr_debug("md/raid0:%s:   NOT EQUAL\n",
137				 mdname(mddev));
138		}
139		if (!c) {
140			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
141				 mdname(mddev));
142			conf->nr_strip_zones++;
143			pr_debug("md/raid0:%s: %d zones\n",
144				 mdname(mddev), conf->nr_strip_zones);
145		}
146	}
147	pr_debug("md/raid0:%s: FINAL %d zones\n",
148		 mdname(mddev), conf->nr_strip_zones);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149	/*
150	 * now since we have the hard sector sizes, we can make sure
151	 * chunk size is a multiple of that sector size
152	 */
153	if ((mddev->chunk_sectors << 9) % blksize) {
154		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
155			mdname(mddev),
156			mddev->chunk_sectors << 9, blksize);
157		err = -EINVAL;
158		goto abort;
159	}
160
161	err = -ENOMEM;
162	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
163				conf->nr_strip_zones, GFP_KERNEL);
 
164	if (!conf->strip_zone)
165		goto abort;
166	conf->devlist = kzalloc(sizeof(struct md_rdev*)*
167				conf->nr_strip_zones*mddev->raid_disks,
 
168				GFP_KERNEL);
169	if (!conf->devlist)
170		goto abort;
171
172	/* The first zone must contain all devices, so here we check that
173	 * there is a proper alignment of slots to devices and find them all
174	 */
175	zone = &conf->strip_zone[0];
176	cnt = 0;
177	smallest = NULL;
178	dev = conf->devlist;
179	err = -EINVAL;
180	rdev_for_each(rdev1, mddev) {
181		int j = rdev1->raid_disk;
182
183		if (mddev->level == 10) {
184			/* taking over a raid10-n2 array */
185			j /= 2;
186			rdev1->new_raid_disk = j;
187		}
188
189		if (mddev->level == 1) {
190			/* taiking over a raid1 array-
191			 * we have only one active disk
192			 */
193			j = 0;
194			rdev1->new_raid_disk = j;
195		}
196
197		if (j < 0) {
198			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
199				mdname(mddev));
200			goto abort;
201		}
202		if (j >= mddev->raid_disks) {
203			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
204				mdname(mddev), j);
205			goto abort;
206		}
207		if (dev[j]) {
208			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
209				mdname(mddev), j);
210			goto abort;
211		}
212		dev[j] = rdev1;
213
214		if (!smallest || (rdev1->sectors < smallest->sectors))
215			smallest = rdev1;
216		cnt++;
217	}
218	if (cnt != mddev->raid_disks) {
219		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
220			mdname(mddev), cnt, mddev->raid_disks);
221		goto abort;
222	}
223	zone->nb_dev = cnt;
224	zone->zone_end = smallest->sectors * cnt;
225
226	curr_zone_end = zone->zone_end;
227
228	/* now do the other zones */
229	for (i = 1; i < conf->nr_strip_zones; i++)
230	{
231		int j;
232
233		zone = conf->strip_zone + i;
234		dev = conf->devlist + i * mddev->raid_disks;
235
236		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
237		zone->dev_start = smallest->sectors;
238		smallest = NULL;
239		c = 0;
240
241		for (j=0; j<cnt; j++) {
242			rdev = conf->devlist[j];
243			if (rdev->sectors <= zone->dev_start) {
244				pr_debug("md/raid0:%s: checking %s ... nope\n",
245					 mdname(mddev),
246					 bdevname(rdev->bdev, b));
247				continue;
248			}
249			pr_debug("md/raid0:%s: checking %s ..."
250				 " contained as device %d\n",
251				 mdname(mddev),
252				 bdevname(rdev->bdev, b), c);
253			dev[c] = rdev;
254			c++;
255			if (!smallest || rdev->sectors < smallest->sectors) {
256				smallest = rdev;
257				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
258					 mdname(mddev),
259					 (unsigned long long)rdev->sectors);
260			}
261		}
262
263		zone->nb_dev = c;
264		sectors = (smallest->sectors - zone->dev_start) * c;
265		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
266			 mdname(mddev),
267			 zone->nb_dev, (unsigned long long)sectors);
268
269		curr_zone_end += sectors;
270		zone->zone_end = curr_zone_end;
271
272		pr_debug("md/raid0:%s: current zone start: %llu\n",
273			 mdname(mddev),
274			 (unsigned long long)smallest->sectors);
275	}
276
277	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
278	*private_conf = conf;
279
280	return 0;
281abort:
282	kfree(conf->strip_zone);
283	kfree(conf->devlist);
284	kfree(conf);
285	*private_conf = ERR_PTR(err);
286	return err;
287}
288
289/* Find the zone which holds a particular offset
290 * Update *sectorp to be an offset in that zone
291 */
292static struct strip_zone *find_zone(struct r0conf *conf,
293				    sector_t *sectorp)
294{
295	int i;
296	struct strip_zone *z = conf->strip_zone;
297	sector_t sector = *sectorp;
298
299	for (i = 0; i < conf->nr_strip_zones; i++)
300		if (sector < z[i].zone_end) {
301			if (i)
302				*sectorp = sector - z[i-1].zone_end;
303			return z + i;
304		}
305	BUG();
306}
307
308/*
309 * remaps the bio to the target device. we separate two flows.
310 * power 2 flow and a general flow for the sake of performance
311*/
312static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
313				sector_t sector, sector_t *sector_offset)
314{
315	unsigned int sect_in_chunk;
316	sector_t chunk;
317	struct r0conf *conf = mddev->private;
318	int raid_disks = conf->strip_zone[0].nb_dev;
319	unsigned int chunk_sects = mddev->chunk_sectors;
320
321	if (is_power_of_2(chunk_sects)) {
322		int chunksect_bits = ffz(~chunk_sects);
323		/* find the sector offset inside the chunk */
324		sect_in_chunk  = sector & (chunk_sects - 1);
325		sector >>= chunksect_bits;
326		/* chunk in zone */
327		chunk = *sector_offset;
328		/* quotient is the chunk in real device*/
329		sector_div(chunk, zone->nb_dev << chunksect_bits);
330	} else{
331		sect_in_chunk = sector_div(sector, chunk_sects);
332		chunk = *sector_offset;
333		sector_div(chunk, chunk_sects * zone->nb_dev);
334	}
335	/*
336	*  position the bio over the real device
337	*  real sector = chunk in device + starting of zone
338	*	+ the position in the chunk
339	*/
340	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
341	return conf->devlist[(zone - conf->strip_zone)*raid_disks
342			     + sector_div(sector, zone->nb_dev)];
343}
344
345static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
346{
347	sector_t array_sectors = 0;
348	struct md_rdev *rdev;
349
350	WARN_ONCE(sectors || raid_disks,
351		  "%s does not support generic reshape\n", __func__);
352
353	rdev_for_each(rdev, mddev)
354		array_sectors += (rdev->sectors &
355				  ~(sector_t)(mddev->chunk_sectors-1));
356
357	return array_sectors;
358}
359
360static void raid0_free(struct mddev *mddev, void *priv);
361
362static int raid0_run(struct mddev *mddev)
363{
364	struct r0conf *conf;
365	int ret;
366
367	if (mddev->chunk_sectors == 0) {
368		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
369		return -EINVAL;
370	}
371	if (md_check_no_bitmap(mddev))
372		return -EINVAL;
373
374	/* if private is not null, we are here after takeover */
375	if (mddev->private == NULL) {
376		ret = create_strip_zones(mddev, &conf);
377		if (ret < 0)
378			return ret;
379		mddev->private = conf;
380	}
381	conf = mddev->private;
382	if (mddev->queue) {
383		struct md_rdev *rdev;
384		bool discard_supported = false;
385
386		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
387		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
388		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
389		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
390
391		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
392		blk_queue_io_opt(mddev->queue,
393				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
394
395		rdev_for_each(rdev, mddev) {
396			disk_stack_limits(mddev->gendisk, rdev->bdev,
397					  rdev->data_offset << 9);
398			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
399				discard_supported = true;
400		}
401		if (!discard_supported)
402			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
403		else
404			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
405	}
406
407	/* calculate array device size */
408	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
409
410	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
411		 mdname(mddev),
412		 (unsigned long long)mddev->array_sectors);
413
414	if (mddev->queue) {
415		/* calculate the max read-ahead size.
416		 * For read-ahead of large files to be effective, we need to
417		 * readahead at least twice a whole stripe. i.e. number of devices
418		 * multiplied by chunk size times 2.
419		 * If an individual device has an ra_pages greater than the
420		 * chunk size, then we will not drive that device as hard as it
421		 * wants.  We consider this a configuration error: a larger
422		 * chunksize should be used in that case.
423		 */
424		int stripe = mddev->raid_disks *
425			(mddev->chunk_sectors << 9) / PAGE_SIZE;
426		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
427			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
428	}
429
430	dump_zones(mddev);
431
432	ret = md_integrity_register(mddev);
433
434	return ret;
435}
436
437static void raid0_free(struct mddev *mddev, void *priv)
438{
439	struct r0conf *conf = priv;
440
441	kfree(conf->strip_zone);
442	kfree(conf->devlist);
443	kfree(conf);
444}
445
446/*
447 * Is io distribute over 1 or more chunks ?
448*/
449static inline int is_io_in_chunk_boundary(struct mddev *mddev,
450			unsigned int chunk_sects, struct bio *bio)
451{
452	if (likely(is_power_of_2(chunk_sects))) {
453		return chunk_sects >=
454			((bio->bi_iter.bi_sector & (chunk_sects-1))
455					+ bio_sectors(bio));
456	} else{
457		sector_t sector = bio->bi_iter.bi_sector;
458		return chunk_sects >= (sector_div(sector, chunk_sects)
459						+ bio_sectors(bio));
460	}
461}
462
463static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
464{
465	struct r0conf *conf = mddev->private;
466	struct strip_zone *zone;
467	sector_t start = bio->bi_iter.bi_sector;
468	sector_t end;
469	unsigned int stripe_size;
470	sector_t first_stripe_index, last_stripe_index;
471	sector_t start_disk_offset;
472	unsigned int start_disk_index;
473	sector_t end_disk_offset;
474	unsigned int end_disk_index;
475	unsigned int disk;
476
477	zone = find_zone(conf, &start);
478
479	if (bio_end_sector(bio) > zone->zone_end) {
480		struct bio *split = bio_split(bio,
481			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
482			mddev->bio_set);
483		bio_chain(split, bio);
484		generic_make_request(bio);
485		bio = split;
486		end = zone->zone_end;
487	} else
488		end = bio_end_sector(bio);
489
490	if (zone != conf->strip_zone)
491		end = end - zone[-1].zone_end;
492
493	/* Now start and end is the offset in zone */
494	stripe_size = zone->nb_dev * mddev->chunk_sectors;
495
496	first_stripe_index = start;
497	sector_div(first_stripe_index, stripe_size);
498	last_stripe_index = end;
499	sector_div(last_stripe_index, stripe_size);
500
501	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
502		mddev->chunk_sectors;
503	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
504		mddev->chunk_sectors) +
505		first_stripe_index * mddev->chunk_sectors;
506	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
507		mddev->chunk_sectors;
508	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
509		mddev->chunk_sectors) +
510		last_stripe_index * mddev->chunk_sectors;
511
512	for (disk = 0; disk < zone->nb_dev; disk++) {
513		sector_t dev_start, dev_end;
514		struct bio *discard_bio = NULL;
515		struct md_rdev *rdev;
516
517		if (disk < start_disk_index)
518			dev_start = (first_stripe_index + 1) *
519				mddev->chunk_sectors;
520		else if (disk > start_disk_index)
521			dev_start = first_stripe_index * mddev->chunk_sectors;
522		else
523			dev_start = start_disk_offset;
524
525		if (disk < end_disk_index)
526			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
527		else if (disk > end_disk_index)
528			dev_end = last_stripe_index * mddev->chunk_sectors;
529		else
530			dev_end = end_disk_offset;
531
532		if (dev_end <= dev_start)
533			continue;
534
535		rdev = conf->devlist[(zone - conf->strip_zone) *
536			conf->strip_zone[0].nb_dev + disk];
537		if (__blkdev_issue_discard(rdev->bdev,
538			dev_start + zone->dev_start + rdev->data_offset,
539			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
540		    !discard_bio)
541			continue;
542		bio_chain(discard_bio, bio);
543		bio_clone_blkcg_association(discard_bio, bio);
544		if (mddev->gendisk)
545			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
546				discard_bio, disk_devt(mddev->gendisk),
547				bio->bi_iter.bi_sector);
548		generic_make_request(discard_bio);
549	}
550	bio_endio(bio);
551}
552
553static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
554{
 
555	struct strip_zone *zone;
556	struct md_rdev *tmp_dev;
557	sector_t bio_sector;
558	sector_t sector;
 
559	unsigned chunk_sects;
560	unsigned sectors;
561
562	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
563		md_flush_request(mddev, bio);
564		return true;
565	}
566
567	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
568		raid0_handle_discard(mddev, bio);
569		return true;
570	}
571
572	bio_sector = bio->bi_iter.bi_sector;
573	sector = bio_sector;
574	chunk_sects = mddev->chunk_sectors;
575
576	sectors = chunk_sects -
577		(likely(is_power_of_2(chunk_sects))
578		 ? (sector & (chunk_sects-1))
579		 : sector_div(sector, chunk_sects));
580
581	/* Restore due to sector_div */
582	sector = bio_sector;
583
584	if (sectors < bio_sectors(bio)) {
585		struct bio *split = bio_split(bio, sectors, GFP_NOIO, mddev->bio_set);
 
586		bio_chain(split, bio);
587		generic_make_request(bio);
588		bio = split;
589	}
590
 
591	zone = find_zone(mddev->private, &sector);
592	tmp_dev = map_sector(mddev, zone, sector, &sector);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
593	bio_set_dev(bio, tmp_dev->bdev);
594	bio->bi_iter.bi_sector = sector + zone->dev_start +
595		tmp_dev->data_offset;
596
597	if (mddev->gendisk)
598		trace_block_bio_remap(bio->bi_disk->queue, bio,
599				disk_devt(mddev->gendisk), bio_sector);
600	mddev_check_writesame(mddev, bio);
601	mddev_check_write_zeroes(mddev, bio);
602	generic_make_request(bio);
603	return true;
604}
605
606static void raid0_status(struct seq_file *seq, struct mddev *mddev)
607{
608	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
609	return;
610}
611
612static void *raid0_takeover_raid45(struct mddev *mddev)
613{
614	struct md_rdev *rdev;
615	struct r0conf *priv_conf;
616
617	if (mddev->degraded != 1) {
618		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
619			mdname(mddev),
620			mddev->degraded);
621		return ERR_PTR(-EINVAL);
622	}
623
624	rdev_for_each(rdev, mddev) {
625		/* check slot number for a disk */
626		if (rdev->raid_disk == mddev->raid_disks-1) {
627			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
628				mdname(mddev));
629			return ERR_PTR(-EINVAL);
630		}
631		rdev->sectors = mddev->dev_sectors;
632	}
633
634	/* Set new parameters */
635	mddev->new_level = 0;
636	mddev->new_layout = 0;
637	mddev->new_chunk_sectors = mddev->chunk_sectors;
638	mddev->raid_disks--;
639	mddev->delta_disks = -1;
640	/* make sure it will be not marked as dirty */
641	mddev->recovery_cp = MaxSector;
642	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
643
644	create_strip_zones(mddev, &priv_conf);
645
646	return priv_conf;
647}
648
649static void *raid0_takeover_raid10(struct mddev *mddev)
650{
651	struct r0conf *priv_conf;
652
653	/* Check layout:
654	 *  - far_copies must be 1
655	 *  - near_copies must be 2
656	 *  - disks number must be even
657	 *  - all mirrors must be already degraded
658	 */
659	if (mddev->layout != ((1 << 8) + 2)) {
660		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
661			mdname(mddev),
662			mddev->layout);
663		return ERR_PTR(-EINVAL);
664	}
665	if (mddev->raid_disks & 1) {
666		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
667			mdname(mddev));
668		return ERR_PTR(-EINVAL);
669	}
670	if (mddev->degraded != (mddev->raid_disks>>1)) {
671		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
672			mdname(mddev));
673		return ERR_PTR(-EINVAL);
674	}
675
676	/* Set new parameters */
677	mddev->new_level = 0;
678	mddev->new_layout = 0;
679	mddev->new_chunk_sectors = mddev->chunk_sectors;
680	mddev->delta_disks = - mddev->raid_disks / 2;
681	mddev->raid_disks += mddev->delta_disks;
682	mddev->degraded = 0;
683	/* make sure it will be not marked as dirty */
684	mddev->recovery_cp = MaxSector;
685	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
686
687	create_strip_zones(mddev, &priv_conf);
688	return priv_conf;
689}
690
691static void *raid0_takeover_raid1(struct mddev *mddev)
692{
693	struct r0conf *priv_conf;
694	int chunksect;
695
696	/* Check layout:
697	 *  - (N - 1) mirror drives must be already faulty
698	 */
699	if ((mddev->raid_disks - 1) != mddev->degraded) {
700		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
701		       mdname(mddev));
702		return ERR_PTR(-EINVAL);
703	}
704
705	/*
706	 * a raid1 doesn't have the notion of chunk size, so
707	 * figure out the largest suitable size we can use.
708	 */
709	chunksect = 64 * 2; /* 64K by default */
710
711	/* The array must be an exact multiple of chunksize */
712	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
713		chunksect >>= 1;
714
715	if ((chunksect << 9) < PAGE_SIZE)
716		/* array size does not allow a suitable chunk size */
717		return ERR_PTR(-EINVAL);
718
719	/* Set new parameters */
720	mddev->new_level = 0;
721	mddev->new_layout = 0;
722	mddev->new_chunk_sectors = chunksect;
723	mddev->chunk_sectors = chunksect;
724	mddev->delta_disks = 1 - mddev->raid_disks;
725	mddev->raid_disks = 1;
726	/* make sure it will be not marked as dirty */
727	mddev->recovery_cp = MaxSector;
728	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
729
730	create_strip_zones(mddev, &priv_conf);
731	return priv_conf;
732}
733
734static void *raid0_takeover(struct mddev *mddev)
735{
736	/* raid0 can take over:
737	 *  raid4 - if all data disks are active.
738	 *  raid5 - providing it is Raid4 layout and one disk is faulty
739	 *  raid10 - assuming we have all necessary active disks
740	 *  raid1 - with (N -1) mirror drives faulty
741	 */
742
743	if (mddev->bitmap) {
744		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
745			mdname(mddev));
746		return ERR_PTR(-EBUSY);
747	}
748	if (mddev->level == 4)
749		return raid0_takeover_raid45(mddev);
750
751	if (mddev->level == 5) {
752		if (mddev->layout == ALGORITHM_PARITY_N)
753			return raid0_takeover_raid45(mddev);
754
755		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
756			mdname(mddev), ALGORITHM_PARITY_N);
757	}
758
759	if (mddev->level == 10)
760		return raid0_takeover_raid10(mddev);
761
762	if (mddev->level == 1)
763		return raid0_takeover_raid1(mddev);
764
765	pr_warn("Takeover from raid%i to raid0 not supported\n",
766		mddev->level);
767
768	return ERR_PTR(-EINVAL);
769}
770
771static void raid0_quiesce(struct mddev *mddev, int quiesce)
772{
773}
774
775static struct md_personality raid0_personality=
776{
777	.name		= "raid0",
778	.level		= 0,
779	.owner		= THIS_MODULE,
780	.make_request	= raid0_make_request,
781	.run		= raid0_run,
782	.free		= raid0_free,
783	.status		= raid0_status,
784	.size		= raid0_size,
785	.takeover	= raid0_takeover,
786	.quiesce	= raid0_quiesce,
787	.congested	= raid0_congested,
788};
789
790static int __init raid0_init (void)
791{
792	return register_md_personality (&raid0_personality);
793}
794
795static void raid0_exit (void)
796{
797	unregister_md_personality (&raid0_personality);
798}
799
800module_init(raid0_init);
801module_exit(raid0_exit);
802MODULE_LICENSE("GPL");
803MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
804MODULE_ALIAS("md-personality-2"); /* RAID0 */
805MODULE_ALIAS("md-raid0");
806MODULE_ALIAS("md-level-0");