Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3   raid0.c : Multiple Devices driver for Linux
  4	     Copyright (C) 1994-96 Marc ZYNGIER
  5	     <zyngier@ufr-info-p7.ibp.fr> or
  6	     <maz@gloups.fdn.fr>
  7	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  8
  9   RAID-0 management functions.
 10
 11*/
 12
 13#include <linux/blkdev.h>
 14#include <linux/seq_file.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17#include <trace/events/block.h>
 18#include "md.h"
 19#include "raid0.h"
 20#include "raid5.h"
 21
 22static int default_layout = 0;
 23module_param(default_layout, int, 0644);
 24
 25#define UNSUPPORTED_MDDEV_FLAGS		\
 26	((1L << MD_HAS_JOURNAL) |	\
 27	 (1L << MD_JOURNAL_CLEAN) |	\
 28	 (1L << MD_FAILFAST_SUPPORTED) |\
 29	 (1L << MD_HAS_PPL) |		\
 30	 (1L << MD_HAS_MULTIPLE_PPLS))
 31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32/*
 33 * inform the user of the raid configuration
 34*/
 35static void dump_zones(struct mddev *mddev)
 36{
 37	int j, k;
 38	sector_t zone_size = 0;
 39	sector_t zone_start = 0;
 
 40	struct r0conf *conf = mddev->private;
 41	int raid_disks = conf->strip_zone[0].nb_dev;
 42	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
 43		 mdname(mddev),
 44		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 45	for (j = 0; j < conf->nr_strip_zones; j++) {
 46		char line[200];
 47		int len = 0;
 48
 49		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 50			len += scnprintf(line+len, 200-len, "%s%pg", k?"/":"",
 51				conf->devlist[j * raid_disks + k]->bdev);
 
 52		pr_debug("md: zone%d=[%s]\n", j, line);
 53
 54		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 55		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 56			(unsigned long long)zone_start>>1,
 57			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 58			(unsigned long long)zone_size>>1);
 59		zone_start = conf->strip_zone[j].zone_end;
 60	}
 61}
 62
 63static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 64{
 65	int i, c, err;
 66	sector_t curr_zone_end, sectors;
 67	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 68	struct strip_zone *zone;
 69	int cnt;
 
 
 70	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 71	unsigned blksize = 512;
 72
 73	*private_conf = ERR_PTR(-ENOMEM);
 74	if (!conf)
 75		return -ENOMEM;
 76	rdev_for_each(rdev1, mddev) {
 77		pr_debug("md/raid0:%s: looking at %pg\n",
 78			 mdname(mddev),
 79			 rdev1->bdev);
 80		c = 0;
 81
 82		/* round size to chunk_size */
 83		sectors = rdev1->sectors;
 84		sector_div(sectors, mddev->chunk_sectors);
 85		rdev1->sectors = sectors * mddev->chunk_sectors;
 86
 87		blksize = max(blksize, queue_logical_block_size(
 88				      rdev1->bdev->bd_disk->queue));
 89
 90		rdev_for_each(rdev2, mddev) {
 91			pr_debug("md/raid0:%s:   comparing %pg(%llu)"
 92				 " with %pg(%llu)\n",
 93				 mdname(mddev),
 94				 rdev1->bdev,
 95				 (unsigned long long)rdev1->sectors,
 96				 rdev2->bdev,
 97				 (unsigned long long)rdev2->sectors);
 98			if (rdev2 == rdev1) {
 99				pr_debug("md/raid0:%s:   END\n",
100					 mdname(mddev));
101				break;
102			}
103			if (rdev2->sectors == rdev1->sectors) {
104				/*
105				 * Not unique, don't count it as a new
106				 * group
107				 */
108				pr_debug("md/raid0:%s:   EQUAL\n",
109					 mdname(mddev));
110				c = 1;
111				break;
112			}
113			pr_debug("md/raid0:%s:   NOT EQUAL\n",
114				 mdname(mddev));
115		}
116		if (!c) {
117			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
118				 mdname(mddev));
119			conf->nr_strip_zones++;
120			pr_debug("md/raid0:%s: %d zones\n",
121				 mdname(mddev), conf->nr_strip_zones);
122		}
123	}
124	pr_debug("md/raid0:%s: FINAL %d zones\n",
125		 mdname(mddev), conf->nr_strip_zones);
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127	/*
128	 * now since we have the hard sector sizes, we can make sure
129	 * chunk size is a multiple of that sector size
130	 */
131	if ((mddev->chunk_sectors << 9) % blksize) {
132		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
133			mdname(mddev),
134			mddev->chunk_sectors << 9, blksize);
135		err = -EINVAL;
136		goto abort;
137	}
138
139	err = -ENOMEM;
140	conf->strip_zone = kcalloc(conf->nr_strip_zones,
141				   sizeof(struct strip_zone),
142				   GFP_KERNEL);
143	if (!conf->strip_zone)
144		goto abort;
145	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
146					    conf->nr_strip_zones,
147					    mddev->raid_disks),
148				GFP_KERNEL);
149	if (!conf->devlist)
150		goto abort;
151
152	/* The first zone must contain all devices, so here we check that
153	 * there is a proper alignment of slots to devices and find them all
154	 */
155	zone = &conf->strip_zone[0];
156	cnt = 0;
157	smallest = NULL;
158	dev = conf->devlist;
159	err = -EINVAL;
160	rdev_for_each(rdev1, mddev) {
161		int j = rdev1->raid_disk;
162
163		if (mddev->level == 10) {
164			/* taking over a raid10-n2 array */
165			j /= 2;
166			rdev1->new_raid_disk = j;
167		}
168
169		if (mddev->level == 1) {
170			/* taiking over a raid1 array-
171			 * we have only one active disk
172			 */
173			j = 0;
174			rdev1->new_raid_disk = j;
175		}
176
177		if (j < 0) {
178			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
179				mdname(mddev));
180			goto abort;
181		}
182		if (j >= mddev->raid_disks) {
183			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
184				mdname(mddev), j);
185			goto abort;
186		}
187		if (dev[j]) {
188			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
189				mdname(mddev), j);
190			goto abort;
191		}
192		dev[j] = rdev1;
193
194		if (!smallest || (rdev1->sectors < smallest->sectors))
195			smallest = rdev1;
196		cnt++;
197	}
198	if (cnt != mddev->raid_disks) {
199		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
200			mdname(mddev), cnt, mddev->raid_disks);
201		goto abort;
202	}
203	zone->nb_dev = cnt;
204	zone->zone_end = smallest->sectors * cnt;
205
206	curr_zone_end = zone->zone_end;
207
208	/* now do the other zones */
209	for (i = 1; i < conf->nr_strip_zones; i++)
210	{
211		int j;
212
213		zone = conf->strip_zone + i;
214		dev = conf->devlist + i * mddev->raid_disks;
215
216		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
217		zone->dev_start = smallest->sectors;
218		smallest = NULL;
219		c = 0;
220
221		for (j=0; j<cnt; j++) {
222			rdev = conf->devlist[j];
223			if (rdev->sectors <= zone->dev_start) {
224				pr_debug("md/raid0:%s: checking %pg ... nope\n",
225					 mdname(mddev),
226					 rdev->bdev);
227				continue;
228			}
229			pr_debug("md/raid0:%s: checking %pg ..."
230				 " contained as device %d\n",
231				 mdname(mddev),
232				 rdev->bdev, c);
233			dev[c] = rdev;
234			c++;
235			if (!smallest || rdev->sectors < smallest->sectors) {
236				smallest = rdev;
237				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
238					 mdname(mddev),
239					 (unsigned long long)rdev->sectors);
240			}
241		}
242
243		zone->nb_dev = c;
244		sectors = (smallest->sectors - zone->dev_start) * c;
245		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
246			 mdname(mddev),
247			 zone->nb_dev, (unsigned long long)sectors);
248
249		curr_zone_end += sectors;
250		zone->zone_end = curr_zone_end;
251
252		pr_debug("md/raid0:%s: current zone start: %llu\n",
253			 mdname(mddev),
254			 (unsigned long long)smallest->sectors);
255	}
256
257	if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
258		conf->layout = RAID0_ORIG_LAYOUT;
259	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
260		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
261		conf->layout = mddev->layout;
262	} else if (default_layout == RAID0_ORIG_LAYOUT ||
263		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
264		conf->layout = default_layout;
265	} else {
266		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
267		       mdname(mddev));
268		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
269		err = -EOPNOTSUPP;
270		goto abort;
271	}
272
273	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
274	*private_conf = conf;
275
276	return 0;
277abort:
278	kfree(conf->strip_zone);
279	kfree(conf->devlist);
280	kfree(conf);
281	*private_conf = ERR_PTR(err);
282	return err;
283}
284
285/* Find the zone which holds a particular offset
286 * Update *sectorp to be an offset in that zone
287 */
288static struct strip_zone *find_zone(struct r0conf *conf,
289				    sector_t *sectorp)
290{
291	int i;
292	struct strip_zone *z = conf->strip_zone;
293	sector_t sector = *sectorp;
294
295	for (i = 0; i < conf->nr_strip_zones; i++)
296		if (sector < z[i].zone_end) {
297			if (i)
298				*sectorp = sector - z[i-1].zone_end;
299			return z + i;
300		}
301	BUG();
302}
303
304/*
305 * remaps the bio to the target device. we separate two flows.
306 * power 2 flow and a general flow for the sake of performance
307*/
308static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
309				sector_t sector, sector_t *sector_offset)
310{
311	unsigned int sect_in_chunk;
312	sector_t chunk;
313	struct r0conf *conf = mddev->private;
314	int raid_disks = conf->strip_zone[0].nb_dev;
315	unsigned int chunk_sects = mddev->chunk_sectors;
316
317	if (is_power_of_2(chunk_sects)) {
318		int chunksect_bits = ffz(~chunk_sects);
319		/* find the sector offset inside the chunk */
320		sect_in_chunk  = sector & (chunk_sects - 1);
321		sector >>= chunksect_bits;
322		/* chunk in zone */
323		chunk = *sector_offset;
324		/* quotient is the chunk in real device*/
325		sector_div(chunk, zone->nb_dev << chunksect_bits);
326	} else{
327		sect_in_chunk = sector_div(sector, chunk_sects);
328		chunk = *sector_offset;
329		sector_div(chunk, chunk_sects * zone->nb_dev);
330	}
331	/*
332	*  position the bio over the real device
333	*  real sector = chunk in device + starting of zone
334	*	+ the position in the chunk
335	*/
336	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
337	return conf->devlist[(zone - conf->strip_zone)*raid_disks
338			     + sector_div(sector, zone->nb_dev)];
339}
340
341static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
342{
343	sector_t array_sectors = 0;
344	struct md_rdev *rdev;
345
346	WARN_ONCE(sectors || raid_disks,
347		  "%s does not support generic reshape\n", __func__);
348
349	rdev_for_each(rdev, mddev)
350		array_sectors += (rdev->sectors &
351				  ~(sector_t)(mddev->chunk_sectors-1));
352
353	return array_sectors;
354}
355
356static void free_conf(struct mddev *mddev, struct r0conf *conf)
357{
358	kfree(conf->strip_zone);
359	kfree(conf->devlist);
360	kfree(conf);
361}
362
363static void raid0_free(struct mddev *mddev, void *priv)
364{
365	struct r0conf *conf = priv;
366
367	free_conf(mddev, conf);
368	acct_bioset_exit(mddev);
369}
370
371static int raid0_run(struct mddev *mddev)
372{
373	struct r0conf *conf;
374	int ret;
375
376	if (mddev->chunk_sectors == 0) {
377		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
378		return -EINVAL;
379	}
380	if (md_check_no_bitmap(mddev))
381		return -EINVAL;
382
383	if (acct_bioset_init(mddev)) {
384		pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
385		return -ENOMEM;
386	}
387
388	/* if private is not null, we are here after takeover */
389	if (mddev->private == NULL) {
390		ret = create_strip_zones(mddev, &conf);
391		if (ret < 0)
392			goto exit_acct_set;
393		mddev->private = conf;
394	}
395	conf = mddev->private;
396	if (mddev->queue) {
397		struct md_rdev *rdev;
 
398
399		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
 
400		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
 
401
402		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
403		blk_queue_io_opt(mddev->queue,
404				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
405
406		rdev_for_each(rdev, mddev) {
407			disk_stack_limits(mddev->gendisk, rdev->bdev,
408					  rdev->data_offset << 9);
 
 
409		}
 
 
 
 
410	}
411
412	/* calculate array device size */
413	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
414
415	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
416		 mdname(mddev),
417		 (unsigned long long)mddev->array_sectors);
418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419	dump_zones(mddev);
420
421	ret = md_integrity_register(mddev);
422	if (ret)
423		goto free;
424
425	return ret;
 
426
427free:
428	free_conf(mddev, conf);
429exit_acct_set:
430	acct_bioset_exit(mddev);
431	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432}
433
434static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
435{
436	struct r0conf *conf = mddev->private;
437	struct strip_zone *zone;
438	sector_t start = bio->bi_iter.bi_sector;
439	sector_t end;
440	unsigned int stripe_size;
441	sector_t first_stripe_index, last_stripe_index;
442	sector_t start_disk_offset;
443	unsigned int start_disk_index;
444	sector_t end_disk_offset;
445	unsigned int end_disk_index;
446	unsigned int disk;
447
448	zone = find_zone(conf, &start);
449
450	if (bio_end_sector(bio) > zone->zone_end) {
451		struct bio *split = bio_split(bio,
452			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
453			&mddev->bio_set);
454		bio_chain(split, bio);
455		submit_bio_noacct(bio);
456		bio = split;
457		end = zone->zone_end;
458	} else
459		end = bio_end_sector(bio);
460
461	if (zone != conf->strip_zone)
462		end = end - zone[-1].zone_end;
463
464	/* Now start and end is the offset in zone */
465	stripe_size = zone->nb_dev * mddev->chunk_sectors;
466
467	first_stripe_index = start;
468	sector_div(first_stripe_index, stripe_size);
469	last_stripe_index = end;
470	sector_div(last_stripe_index, stripe_size);
471
472	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
473		mddev->chunk_sectors;
474	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
475		mddev->chunk_sectors) +
476		first_stripe_index * mddev->chunk_sectors;
477	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
478		mddev->chunk_sectors;
479	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
480		mddev->chunk_sectors) +
481		last_stripe_index * mddev->chunk_sectors;
482
483	for (disk = 0; disk < zone->nb_dev; disk++) {
484		sector_t dev_start, dev_end;
 
485		struct md_rdev *rdev;
486
487		if (disk < start_disk_index)
488			dev_start = (first_stripe_index + 1) *
489				mddev->chunk_sectors;
490		else if (disk > start_disk_index)
491			dev_start = first_stripe_index * mddev->chunk_sectors;
492		else
493			dev_start = start_disk_offset;
494
495		if (disk < end_disk_index)
496			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
497		else if (disk > end_disk_index)
498			dev_end = last_stripe_index * mddev->chunk_sectors;
499		else
500			dev_end = end_disk_offset;
501
502		if (dev_end <= dev_start)
503			continue;
504
505		rdev = conf->devlist[(zone - conf->strip_zone) *
506			conf->strip_zone[0].nb_dev + disk];
507		md_submit_discard_bio(mddev, rdev, bio,
508			dev_start + zone->dev_start + rdev->data_offset,
509			dev_end - dev_start);
 
 
 
 
 
 
 
 
 
510	}
511	bio_endio(bio);
512}
513
514static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
515{
516	struct r0conf *conf = mddev->private;
517	struct strip_zone *zone;
518	struct md_rdev *tmp_dev;
519	sector_t bio_sector;
520	sector_t sector;
521	sector_t orig_sector;
522	unsigned chunk_sects;
523	unsigned sectors;
524
525	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
526	    && md_flush_request(mddev, bio))
527		return true;
 
528
529	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
530		raid0_handle_discard(mddev, bio);
531		return true;
532	}
533
534	bio_sector = bio->bi_iter.bi_sector;
535	sector = bio_sector;
536	chunk_sects = mddev->chunk_sectors;
537
538	sectors = chunk_sects -
539		(likely(is_power_of_2(chunk_sects))
540		 ? (sector & (chunk_sects-1))
541		 : sector_div(sector, chunk_sects));
542
543	/* Restore due to sector_div */
544	sector = bio_sector;
545
546	if (sectors < bio_sectors(bio)) {
547		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
548					      &mddev->bio_set);
549		bio_chain(split, bio);
550		submit_bio_noacct(bio);
551		bio = split;
552	}
553
554	if (bio->bi_pool != &mddev->bio_set)
555		md_account_bio(mddev, &bio);
556
557	orig_sector = sector;
558	zone = find_zone(mddev->private, &sector);
559	switch (conf->layout) {
560	case RAID0_ORIG_LAYOUT:
561		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
562		break;
563	case RAID0_ALT_MULTIZONE_LAYOUT:
564		tmp_dev = map_sector(mddev, zone, sector, &sector);
565		break;
566	default:
567		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
568		bio_io_error(bio);
569		return true;
570	}
571
572	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
573		bio_io_error(bio);
574		return true;
575	}
576
577	bio_set_dev(bio, tmp_dev->bdev);
578	bio->bi_iter.bi_sector = sector + zone->dev_start +
579		tmp_dev->data_offset;
580
581	if (mddev->gendisk)
582		trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
583				      bio_sector);
 
584	mddev_check_write_zeroes(mddev, bio);
585	submit_bio_noacct(bio);
586	return true;
587}
588
589static void raid0_status(struct seq_file *seq, struct mddev *mddev)
590{
591	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
592	return;
593}
594
595static void *raid0_takeover_raid45(struct mddev *mddev)
596{
597	struct md_rdev *rdev;
598	struct r0conf *priv_conf;
599
600	if (mddev->degraded != 1) {
601		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
602			mdname(mddev),
603			mddev->degraded);
604		return ERR_PTR(-EINVAL);
605	}
606
607	rdev_for_each(rdev, mddev) {
608		/* check slot number for a disk */
609		if (rdev->raid_disk == mddev->raid_disks-1) {
610			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
611				mdname(mddev));
612			return ERR_PTR(-EINVAL);
613		}
614		rdev->sectors = mddev->dev_sectors;
615	}
616
617	/* Set new parameters */
618	mddev->new_level = 0;
619	mddev->new_layout = 0;
620	mddev->new_chunk_sectors = mddev->chunk_sectors;
621	mddev->raid_disks--;
622	mddev->delta_disks = -1;
623	/* make sure it will be not marked as dirty */
624	mddev->recovery_cp = MaxSector;
625	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
626
627	create_strip_zones(mddev, &priv_conf);
628
629	return priv_conf;
630}
631
632static void *raid0_takeover_raid10(struct mddev *mddev)
633{
634	struct r0conf *priv_conf;
635
636	/* Check layout:
637	 *  - far_copies must be 1
638	 *  - near_copies must be 2
639	 *  - disks number must be even
640	 *  - all mirrors must be already degraded
641	 */
642	if (mddev->layout != ((1 << 8) + 2)) {
643		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
644			mdname(mddev),
645			mddev->layout);
646		return ERR_PTR(-EINVAL);
647	}
648	if (mddev->raid_disks & 1) {
649		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
650			mdname(mddev));
651		return ERR_PTR(-EINVAL);
652	}
653	if (mddev->degraded != (mddev->raid_disks>>1)) {
654		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
655			mdname(mddev));
656		return ERR_PTR(-EINVAL);
657	}
658
659	/* Set new parameters */
660	mddev->new_level = 0;
661	mddev->new_layout = 0;
662	mddev->new_chunk_sectors = mddev->chunk_sectors;
663	mddev->delta_disks = - mddev->raid_disks / 2;
664	mddev->raid_disks += mddev->delta_disks;
665	mddev->degraded = 0;
666	/* make sure it will be not marked as dirty */
667	mddev->recovery_cp = MaxSector;
668	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
669
670	create_strip_zones(mddev, &priv_conf);
671	return priv_conf;
672}
673
674static void *raid0_takeover_raid1(struct mddev *mddev)
675{
676	struct r0conf *priv_conf;
677	int chunksect;
678
679	/* Check layout:
680	 *  - (N - 1) mirror drives must be already faulty
681	 */
682	if ((mddev->raid_disks - 1) != mddev->degraded) {
683		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
684		       mdname(mddev));
685		return ERR_PTR(-EINVAL);
686	}
687
688	/*
689	 * a raid1 doesn't have the notion of chunk size, so
690	 * figure out the largest suitable size we can use.
691	 */
692	chunksect = 64 * 2; /* 64K by default */
693
694	/* The array must be an exact multiple of chunksize */
695	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
696		chunksect >>= 1;
697
698	if ((chunksect << 9) < PAGE_SIZE)
699		/* array size does not allow a suitable chunk size */
700		return ERR_PTR(-EINVAL);
701
702	/* Set new parameters */
703	mddev->new_level = 0;
704	mddev->new_layout = 0;
705	mddev->new_chunk_sectors = chunksect;
706	mddev->chunk_sectors = chunksect;
707	mddev->delta_disks = 1 - mddev->raid_disks;
708	mddev->raid_disks = 1;
709	/* make sure it will be not marked as dirty */
710	mddev->recovery_cp = MaxSector;
711	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
712
713	create_strip_zones(mddev, &priv_conf);
714	return priv_conf;
715}
716
717static void *raid0_takeover(struct mddev *mddev)
718{
719	/* raid0 can take over:
720	 *  raid4 - if all data disks are active.
721	 *  raid5 - providing it is Raid4 layout and one disk is faulty
722	 *  raid10 - assuming we have all necessary active disks
723	 *  raid1 - with (N -1) mirror drives faulty
724	 */
725
726	if (mddev->bitmap) {
727		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
728			mdname(mddev));
729		return ERR_PTR(-EBUSY);
730	}
731	if (mddev->level == 4)
732		return raid0_takeover_raid45(mddev);
733
734	if (mddev->level == 5) {
735		if (mddev->layout == ALGORITHM_PARITY_N)
736			return raid0_takeover_raid45(mddev);
737
738		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
739			mdname(mddev), ALGORITHM_PARITY_N);
740	}
741
742	if (mddev->level == 10)
743		return raid0_takeover_raid10(mddev);
744
745	if (mddev->level == 1)
746		return raid0_takeover_raid1(mddev);
747
748	pr_warn("Takeover from raid%i to raid0 not supported\n",
749		mddev->level);
750
751	return ERR_PTR(-EINVAL);
752}
753
754static void raid0_quiesce(struct mddev *mddev, int quiesce)
755{
756}
757
758static struct md_personality raid0_personality=
759{
760	.name		= "raid0",
761	.level		= 0,
762	.owner		= THIS_MODULE,
763	.make_request	= raid0_make_request,
764	.run		= raid0_run,
765	.free		= raid0_free,
766	.status		= raid0_status,
767	.size		= raid0_size,
768	.takeover	= raid0_takeover,
769	.quiesce	= raid0_quiesce,
 
770};
771
772static int __init raid0_init (void)
773{
774	return register_md_personality (&raid0_personality);
775}
776
777static void raid0_exit (void)
778{
779	unregister_md_personality (&raid0_personality);
780}
781
782module_init(raid0_init);
783module_exit(raid0_exit);
784MODULE_LICENSE("GPL");
785MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
786MODULE_ALIAS("md-personality-2"); /* RAID0 */
787MODULE_ALIAS("md-raid0");
788MODULE_ALIAS("md-level-0");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3   raid0.c : Multiple Devices driver for Linux
  4	     Copyright (C) 1994-96 Marc ZYNGIER
  5	     <zyngier@ufr-info-p7.ibp.fr> or
  6	     <maz@gloups.fdn.fr>
  7	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  8
  9   RAID-0 management functions.
 10
 11*/
 12
 13#include <linux/blkdev.h>
 14#include <linux/seq_file.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17#include <trace/events/block.h>
 18#include "md.h"
 19#include "raid0.h"
 20#include "raid5.h"
 21
 22static int default_layout = 0;
 23module_param(default_layout, int, 0644);
 24
 25#define UNSUPPORTED_MDDEV_FLAGS		\
 26	((1L << MD_HAS_JOURNAL) |	\
 27	 (1L << MD_JOURNAL_CLEAN) |	\
 28	 (1L << MD_FAILFAST_SUPPORTED) |\
 29	 (1L << MD_HAS_PPL) |		\
 30	 (1L << MD_HAS_MULTIPLE_PPLS))
 31
 32static int raid0_congested(struct mddev *mddev, int bits)
 33{
 34	struct r0conf *conf = mddev->private;
 35	struct md_rdev **devlist = conf->devlist;
 36	int raid_disks = conf->strip_zone[0].nb_dev;
 37	int i, ret = 0;
 38
 39	for (i = 0; i < raid_disks && !ret ; i++) {
 40		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
 41
 42		ret |= bdi_congested(q->backing_dev_info, bits);
 43	}
 44	return ret;
 45}
 46
 47/*
 48 * inform the user of the raid configuration
 49*/
 50static void dump_zones(struct mddev *mddev)
 51{
 52	int j, k;
 53	sector_t zone_size = 0;
 54	sector_t zone_start = 0;
 55	char b[BDEVNAME_SIZE];
 56	struct r0conf *conf = mddev->private;
 57	int raid_disks = conf->strip_zone[0].nb_dev;
 58	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
 59		 mdname(mddev),
 60		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 61	for (j = 0; j < conf->nr_strip_zones; j++) {
 62		char line[200];
 63		int len = 0;
 64
 65		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
 66			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
 67					bdevname(conf->devlist[j*raid_disks
 68							       + k]->bdev, b));
 69		pr_debug("md: zone%d=[%s]\n", j, line);
 70
 71		zone_size  = conf->strip_zone[j].zone_end - zone_start;
 72		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 73			(unsigned long long)zone_start>>1,
 74			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 75			(unsigned long long)zone_size>>1);
 76		zone_start = conf->strip_zone[j].zone_end;
 77	}
 78}
 79
 80static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 81{
 82	int i, c, err;
 83	sector_t curr_zone_end, sectors;
 84	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
 85	struct strip_zone *zone;
 86	int cnt;
 87	char b[BDEVNAME_SIZE];
 88	char b2[BDEVNAME_SIZE];
 89	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 90	unsigned short blksize = 512;
 91
 92	*private_conf = ERR_PTR(-ENOMEM);
 93	if (!conf)
 94		return -ENOMEM;
 95	rdev_for_each(rdev1, mddev) {
 96		pr_debug("md/raid0:%s: looking at %s\n",
 97			 mdname(mddev),
 98			 bdevname(rdev1->bdev, b));
 99		c = 0;
100
101		/* round size to chunk_size */
102		sectors = rdev1->sectors;
103		sector_div(sectors, mddev->chunk_sectors);
104		rdev1->sectors = sectors * mddev->chunk_sectors;
105
106		blksize = max(blksize, queue_logical_block_size(
107				      rdev1->bdev->bd_disk->queue));
108
109		rdev_for_each(rdev2, mddev) {
110			pr_debug("md/raid0:%s:   comparing %s(%llu)"
111				 " with %s(%llu)\n",
112				 mdname(mddev),
113				 bdevname(rdev1->bdev,b),
114				 (unsigned long long)rdev1->sectors,
115				 bdevname(rdev2->bdev,b2),
116				 (unsigned long long)rdev2->sectors);
117			if (rdev2 == rdev1) {
118				pr_debug("md/raid0:%s:   END\n",
119					 mdname(mddev));
120				break;
121			}
122			if (rdev2->sectors == rdev1->sectors) {
123				/*
124				 * Not unique, don't count it as a new
125				 * group
126				 */
127				pr_debug("md/raid0:%s:   EQUAL\n",
128					 mdname(mddev));
129				c = 1;
130				break;
131			}
132			pr_debug("md/raid0:%s:   NOT EQUAL\n",
133				 mdname(mddev));
134		}
135		if (!c) {
136			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
137				 mdname(mddev));
138			conf->nr_strip_zones++;
139			pr_debug("md/raid0:%s: %d zones\n",
140				 mdname(mddev), conf->nr_strip_zones);
141		}
142	}
143	pr_debug("md/raid0:%s: FINAL %d zones\n",
144		 mdname(mddev), conf->nr_strip_zones);
145
146	if (conf->nr_strip_zones == 1) {
147		conf->layout = RAID0_ORIG_LAYOUT;
148	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
149		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
150		conf->layout = mddev->layout;
151	} else if (default_layout == RAID0_ORIG_LAYOUT ||
152		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
153		conf->layout = default_layout;
154	} else {
155		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
156		       mdname(mddev));
157		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
158		err = -ENOTSUPP;
159		goto abort;
160	}
161	/*
162	 * now since we have the hard sector sizes, we can make sure
163	 * chunk size is a multiple of that sector size
164	 */
165	if ((mddev->chunk_sectors << 9) % blksize) {
166		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
167			mdname(mddev),
168			mddev->chunk_sectors << 9, blksize);
169		err = -EINVAL;
170		goto abort;
171	}
172
173	err = -ENOMEM;
174	conf->strip_zone = kcalloc(conf->nr_strip_zones,
175				   sizeof(struct strip_zone),
176				   GFP_KERNEL);
177	if (!conf->strip_zone)
178		goto abort;
179	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
180					    conf->nr_strip_zones,
181					    mddev->raid_disks),
182				GFP_KERNEL);
183	if (!conf->devlist)
184		goto abort;
185
186	/* The first zone must contain all devices, so here we check that
187	 * there is a proper alignment of slots to devices and find them all
188	 */
189	zone = &conf->strip_zone[0];
190	cnt = 0;
191	smallest = NULL;
192	dev = conf->devlist;
193	err = -EINVAL;
194	rdev_for_each(rdev1, mddev) {
195		int j = rdev1->raid_disk;
196
197		if (mddev->level == 10) {
198			/* taking over a raid10-n2 array */
199			j /= 2;
200			rdev1->new_raid_disk = j;
201		}
202
203		if (mddev->level == 1) {
204			/* taiking over a raid1 array-
205			 * we have only one active disk
206			 */
207			j = 0;
208			rdev1->new_raid_disk = j;
209		}
210
211		if (j < 0) {
212			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
213				mdname(mddev));
214			goto abort;
215		}
216		if (j >= mddev->raid_disks) {
217			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
218				mdname(mddev), j);
219			goto abort;
220		}
221		if (dev[j]) {
222			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
223				mdname(mddev), j);
224			goto abort;
225		}
226		dev[j] = rdev1;
227
228		if (!smallest || (rdev1->sectors < smallest->sectors))
229			smallest = rdev1;
230		cnt++;
231	}
232	if (cnt != mddev->raid_disks) {
233		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
234			mdname(mddev), cnt, mddev->raid_disks);
235		goto abort;
236	}
237	zone->nb_dev = cnt;
238	zone->zone_end = smallest->sectors * cnt;
239
240	curr_zone_end = zone->zone_end;
241
242	/* now do the other zones */
243	for (i = 1; i < conf->nr_strip_zones; i++)
244	{
245		int j;
246
247		zone = conf->strip_zone + i;
248		dev = conf->devlist + i * mddev->raid_disks;
249
250		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
251		zone->dev_start = smallest->sectors;
252		smallest = NULL;
253		c = 0;
254
255		for (j=0; j<cnt; j++) {
256			rdev = conf->devlist[j];
257			if (rdev->sectors <= zone->dev_start) {
258				pr_debug("md/raid0:%s: checking %s ... nope\n",
259					 mdname(mddev),
260					 bdevname(rdev->bdev, b));
261				continue;
262			}
263			pr_debug("md/raid0:%s: checking %s ..."
264				 " contained as device %d\n",
265				 mdname(mddev),
266				 bdevname(rdev->bdev, b), c);
267			dev[c] = rdev;
268			c++;
269			if (!smallest || rdev->sectors < smallest->sectors) {
270				smallest = rdev;
271				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
272					 mdname(mddev),
273					 (unsigned long long)rdev->sectors);
274			}
275		}
276
277		zone->nb_dev = c;
278		sectors = (smallest->sectors - zone->dev_start) * c;
279		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
280			 mdname(mddev),
281			 zone->nb_dev, (unsigned long long)sectors);
282
283		curr_zone_end += sectors;
284		zone->zone_end = curr_zone_end;
285
286		pr_debug("md/raid0:%s: current zone start: %llu\n",
287			 mdname(mddev),
288			 (unsigned long long)smallest->sectors);
289	}
290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
292	*private_conf = conf;
293
294	return 0;
295abort:
296	kfree(conf->strip_zone);
297	kfree(conf->devlist);
298	kfree(conf);
299	*private_conf = ERR_PTR(err);
300	return err;
301}
302
303/* Find the zone which holds a particular offset
304 * Update *sectorp to be an offset in that zone
305 */
306static struct strip_zone *find_zone(struct r0conf *conf,
307				    sector_t *sectorp)
308{
309	int i;
310	struct strip_zone *z = conf->strip_zone;
311	sector_t sector = *sectorp;
312
313	for (i = 0; i < conf->nr_strip_zones; i++)
314		if (sector < z[i].zone_end) {
315			if (i)
316				*sectorp = sector - z[i-1].zone_end;
317			return z + i;
318		}
319	BUG();
320}
321
322/*
323 * remaps the bio to the target device. we separate two flows.
324 * power 2 flow and a general flow for the sake of performance
325*/
326static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
327				sector_t sector, sector_t *sector_offset)
328{
329	unsigned int sect_in_chunk;
330	sector_t chunk;
331	struct r0conf *conf = mddev->private;
332	int raid_disks = conf->strip_zone[0].nb_dev;
333	unsigned int chunk_sects = mddev->chunk_sectors;
334
335	if (is_power_of_2(chunk_sects)) {
336		int chunksect_bits = ffz(~chunk_sects);
337		/* find the sector offset inside the chunk */
338		sect_in_chunk  = sector & (chunk_sects - 1);
339		sector >>= chunksect_bits;
340		/* chunk in zone */
341		chunk = *sector_offset;
342		/* quotient is the chunk in real device*/
343		sector_div(chunk, zone->nb_dev << chunksect_bits);
344	} else{
345		sect_in_chunk = sector_div(sector, chunk_sects);
346		chunk = *sector_offset;
347		sector_div(chunk, chunk_sects * zone->nb_dev);
348	}
349	/*
350	*  position the bio over the real device
351	*  real sector = chunk in device + starting of zone
352	*	+ the position in the chunk
353	*/
354	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
355	return conf->devlist[(zone - conf->strip_zone)*raid_disks
356			     + sector_div(sector, zone->nb_dev)];
357}
358
359static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
360{
361	sector_t array_sectors = 0;
362	struct md_rdev *rdev;
363
364	WARN_ONCE(sectors || raid_disks,
365		  "%s does not support generic reshape\n", __func__);
366
367	rdev_for_each(rdev, mddev)
368		array_sectors += (rdev->sectors &
369				  ~(sector_t)(mddev->chunk_sectors-1));
370
371	return array_sectors;
372}
373
374static void raid0_free(struct mddev *mddev, void *priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
375
376static int raid0_run(struct mddev *mddev)
377{
378	struct r0conf *conf;
379	int ret;
380
381	if (mddev->chunk_sectors == 0) {
382		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
383		return -EINVAL;
384	}
385	if (md_check_no_bitmap(mddev))
386		return -EINVAL;
387
 
 
 
 
 
388	/* if private is not null, we are here after takeover */
389	if (mddev->private == NULL) {
390		ret = create_strip_zones(mddev, &conf);
391		if (ret < 0)
392			return ret;
393		mddev->private = conf;
394	}
395	conf = mddev->private;
396	if (mddev->queue) {
397		struct md_rdev *rdev;
398		bool discard_supported = false;
399
400		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
401		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
402		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
403		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
404
405		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
406		blk_queue_io_opt(mddev->queue,
407				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
408
409		rdev_for_each(rdev, mddev) {
410			disk_stack_limits(mddev->gendisk, rdev->bdev,
411					  rdev->data_offset << 9);
412			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
413				discard_supported = true;
414		}
415		if (!discard_supported)
416			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
417		else
418			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
419	}
420
421	/* calculate array device size */
422	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
423
424	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
425		 mdname(mddev),
426		 (unsigned long long)mddev->array_sectors);
427
428	if (mddev->queue) {
429		/* calculate the max read-ahead size.
430		 * For read-ahead of large files to be effective, we need to
431		 * readahead at least twice a whole stripe. i.e. number of devices
432		 * multiplied by chunk size times 2.
433		 * If an individual device has an ra_pages greater than the
434		 * chunk size, then we will not drive that device as hard as it
435		 * wants.  We consider this a configuration error: a larger
436		 * chunksize should be used in that case.
437		 */
438		int stripe = mddev->raid_disks *
439			(mddev->chunk_sectors << 9) / PAGE_SIZE;
440		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
441			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
442	}
443
444	dump_zones(mddev);
445
446	ret = md_integrity_register(mddev);
 
 
447
448	return ret;
449}
450
451static void raid0_free(struct mddev *mddev, void *priv)
452{
453	struct r0conf *conf = priv;
454
455	kfree(conf->strip_zone);
456	kfree(conf->devlist);
457	kfree(conf);
458}
459
460/*
461 * Is io distribute over 1 or more chunks ?
462*/
463static inline int is_io_in_chunk_boundary(struct mddev *mddev,
464			unsigned int chunk_sects, struct bio *bio)
465{
466	if (likely(is_power_of_2(chunk_sects))) {
467		return chunk_sects >=
468			((bio->bi_iter.bi_sector & (chunk_sects-1))
469					+ bio_sectors(bio));
470	} else{
471		sector_t sector = bio->bi_iter.bi_sector;
472		return chunk_sects >= (sector_div(sector, chunk_sects)
473						+ bio_sectors(bio));
474	}
475}
476
477static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
478{
479	struct r0conf *conf = mddev->private;
480	struct strip_zone *zone;
481	sector_t start = bio->bi_iter.bi_sector;
482	sector_t end;
483	unsigned int stripe_size;
484	sector_t first_stripe_index, last_stripe_index;
485	sector_t start_disk_offset;
486	unsigned int start_disk_index;
487	sector_t end_disk_offset;
488	unsigned int end_disk_index;
489	unsigned int disk;
490
491	zone = find_zone(conf, &start);
492
493	if (bio_end_sector(bio) > zone->zone_end) {
494		struct bio *split = bio_split(bio,
495			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
496			&mddev->bio_set);
497		bio_chain(split, bio);
498		generic_make_request(bio);
499		bio = split;
500		end = zone->zone_end;
501	} else
502		end = bio_end_sector(bio);
503
504	if (zone != conf->strip_zone)
505		end = end - zone[-1].zone_end;
506
507	/* Now start and end is the offset in zone */
508	stripe_size = zone->nb_dev * mddev->chunk_sectors;
509
510	first_stripe_index = start;
511	sector_div(first_stripe_index, stripe_size);
512	last_stripe_index = end;
513	sector_div(last_stripe_index, stripe_size);
514
515	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
516		mddev->chunk_sectors;
517	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
518		mddev->chunk_sectors) +
519		first_stripe_index * mddev->chunk_sectors;
520	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
521		mddev->chunk_sectors;
522	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
523		mddev->chunk_sectors) +
524		last_stripe_index * mddev->chunk_sectors;
525
526	for (disk = 0; disk < zone->nb_dev; disk++) {
527		sector_t dev_start, dev_end;
528		struct bio *discard_bio = NULL;
529		struct md_rdev *rdev;
530
531		if (disk < start_disk_index)
532			dev_start = (first_stripe_index + 1) *
533				mddev->chunk_sectors;
534		else if (disk > start_disk_index)
535			dev_start = first_stripe_index * mddev->chunk_sectors;
536		else
537			dev_start = start_disk_offset;
538
539		if (disk < end_disk_index)
540			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
541		else if (disk > end_disk_index)
542			dev_end = last_stripe_index * mddev->chunk_sectors;
543		else
544			dev_end = end_disk_offset;
545
546		if (dev_end <= dev_start)
547			continue;
548
549		rdev = conf->devlist[(zone - conf->strip_zone) *
550			conf->strip_zone[0].nb_dev + disk];
551		if (__blkdev_issue_discard(rdev->bdev,
552			dev_start + zone->dev_start + rdev->data_offset,
553			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
554		    !discard_bio)
555			continue;
556		bio_chain(discard_bio, bio);
557		bio_clone_blkg_association(discard_bio, bio);
558		if (mddev->gendisk)
559			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
560				discard_bio, disk_devt(mddev->gendisk),
561				bio->bi_iter.bi_sector);
562		generic_make_request(discard_bio);
563	}
564	bio_endio(bio);
565}
566
567static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
568{
569	struct r0conf *conf = mddev->private;
570	struct strip_zone *zone;
571	struct md_rdev *tmp_dev;
572	sector_t bio_sector;
573	sector_t sector;
574	sector_t orig_sector;
575	unsigned chunk_sects;
576	unsigned sectors;
577
578	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
579		md_flush_request(mddev, bio);
580		return true;
581	}
582
583	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
584		raid0_handle_discard(mddev, bio);
585		return true;
586	}
587
588	bio_sector = bio->bi_iter.bi_sector;
589	sector = bio_sector;
590	chunk_sects = mddev->chunk_sectors;
591
592	sectors = chunk_sects -
593		(likely(is_power_of_2(chunk_sects))
594		 ? (sector & (chunk_sects-1))
595		 : sector_div(sector, chunk_sects));
596
597	/* Restore due to sector_div */
598	sector = bio_sector;
599
600	if (sectors < bio_sectors(bio)) {
601		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
602					      &mddev->bio_set);
603		bio_chain(split, bio);
604		generic_make_request(bio);
605		bio = split;
606	}
607
 
 
 
608	orig_sector = sector;
609	zone = find_zone(mddev->private, &sector);
610	switch (conf->layout) {
611	case RAID0_ORIG_LAYOUT:
612		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
613		break;
614	case RAID0_ALT_MULTIZONE_LAYOUT:
615		tmp_dev = map_sector(mddev, zone, sector, &sector);
616		break;
617	default:
618		WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
619		bio_io_error(bio);
620		return true;
621	}
622
623	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
624		bio_io_error(bio);
625		return true;
626	}
627
628	bio_set_dev(bio, tmp_dev->bdev);
629	bio->bi_iter.bi_sector = sector + zone->dev_start +
630		tmp_dev->data_offset;
631
632	if (mddev->gendisk)
633		trace_block_bio_remap(bio->bi_disk->queue, bio,
634				disk_devt(mddev->gendisk), bio_sector);
635	mddev_check_writesame(mddev, bio);
636	mddev_check_write_zeroes(mddev, bio);
637	generic_make_request(bio);
638	return true;
639}
640
641static void raid0_status(struct seq_file *seq, struct mddev *mddev)
642{
643	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
644	return;
645}
646
647static void *raid0_takeover_raid45(struct mddev *mddev)
648{
649	struct md_rdev *rdev;
650	struct r0conf *priv_conf;
651
652	if (mddev->degraded != 1) {
653		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
654			mdname(mddev),
655			mddev->degraded);
656		return ERR_PTR(-EINVAL);
657	}
658
659	rdev_for_each(rdev, mddev) {
660		/* check slot number for a disk */
661		if (rdev->raid_disk == mddev->raid_disks-1) {
662			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
663				mdname(mddev));
664			return ERR_PTR(-EINVAL);
665		}
666		rdev->sectors = mddev->dev_sectors;
667	}
668
669	/* Set new parameters */
670	mddev->new_level = 0;
671	mddev->new_layout = 0;
672	mddev->new_chunk_sectors = mddev->chunk_sectors;
673	mddev->raid_disks--;
674	mddev->delta_disks = -1;
675	/* make sure it will be not marked as dirty */
676	mddev->recovery_cp = MaxSector;
677	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
678
679	create_strip_zones(mddev, &priv_conf);
680
681	return priv_conf;
682}
683
684static void *raid0_takeover_raid10(struct mddev *mddev)
685{
686	struct r0conf *priv_conf;
687
688	/* Check layout:
689	 *  - far_copies must be 1
690	 *  - near_copies must be 2
691	 *  - disks number must be even
692	 *  - all mirrors must be already degraded
693	 */
694	if (mddev->layout != ((1 << 8) + 2)) {
695		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
696			mdname(mddev),
697			mddev->layout);
698		return ERR_PTR(-EINVAL);
699	}
700	if (mddev->raid_disks & 1) {
701		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
702			mdname(mddev));
703		return ERR_PTR(-EINVAL);
704	}
705	if (mddev->degraded != (mddev->raid_disks>>1)) {
706		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
707			mdname(mddev));
708		return ERR_PTR(-EINVAL);
709	}
710
711	/* Set new parameters */
712	mddev->new_level = 0;
713	mddev->new_layout = 0;
714	mddev->new_chunk_sectors = mddev->chunk_sectors;
715	mddev->delta_disks = - mddev->raid_disks / 2;
716	mddev->raid_disks += mddev->delta_disks;
717	mddev->degraded = 0;
718	/* make sure it will be not marked as dirty */
719	mddev->recovery_cp = MaxSector;
720	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
721
722	create_strip_zones(mddev, &priv_conf);
723	return priv_conf;
724}
725
726static void *raid0_takeover_raid1(struct mddev *mddev)
727{
728	struct r0conf *priv_conf;
729	int chunksect;
730
731	/* Check layout:
732	 *  - (N - 1) mirror drives must be already faulty
733	 */
734	if ((mddev->raid_disks - 1) != mddev->degraded) {
735		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
736		       mdname(mddev));
737		return ERR_PTR(-EINVAL);
738	}
739
740	/*
741	 * a raid1 doesn't have the notion of chunk size, so
742	 * figure out the largest suitable size we can use.
743	 */
744	chunksect = 64 * 2; /* 64K by default */
745
746	/* The array must be an exact multiple of chunksize */
747	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
748		chunksect >>= 1;
749
750	if ((chunksect << 9) < PAGE_SIZE)
751		/* array size does not allow a suitable chunk size */
752		return ERR_PTR(-EINVAL);
753
754	/* Set new parameters */
755	mddev->new_level = 0;
756	mddev->new_layout = 0;
757	mddev->new_chunk_sectors = chunksect;
758	mddev->chunk_sectors = chunksect;
759	mddev->delta_disks = 1 - mddev->raid_disks;
760	mddev->raid_disks = 1;
761	/* make sure it will be not marked as dirty */
762	mddev->recovery_cp = MaxSector;
763	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
764
765	create_strip_zones(mddev, &priv_conf);
766	return priv_conf;
767}
768
769static void *raid0_takeover(struct mddev *mddev)
770{
771	/* raid0 can take over:
772	 *  raid4 - if all data disks are active.
773	 *  raid5 - providing it is Raid4 layout and one disk is faulty
774	 *  raid10 - assuming we have all necessary active disks
775	 *  raid1 - with (N -1) mirror drives faulty
776	 */
777
778	if (mddev->bitmap) {
779		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
780			mdname(mddev));
781		return ERR_PTR(-EBUSY);
782	}
783	if (mddev->level == 4)
784		return raid0_takeover_raid45(mddev);
785
786	if (mddev->level == 5) {
787		if (mddev->layout == ALGORITHM_PARITY_N)
788			return raid0_takeover_raid45(mddev);
789
790		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
791			mdname(mddev), ALGORITHM_PARITY_N);
792	}
793
794	if (mddev->level == 10)
795		return raid0_takeover_raid10(mddev);
796
797	if (mddev->level == 1)
798		return raid0_takeover_raid1(mddev);
799
800	pr_warn("Takeover from raid%i to raid0 not supported\n",
801		mddev->level);
802
803	return ERR_PTR(-EINVAL);
804}
805
806static void raid0_quiesce(struct mddev *mddev, int quiesce)
807{
808}
809
810static struct md_personality raid0_personality=
811{
812	.name		= "raid0",
813	.level		= 0,
814	.owner		= THIS_MODULE,
815	.make_request	= raid0_make_request,
816	.run		= raid0_run,
817	.free		= raid0_free,
818	.status		= raid0_status,
819	.size		= raid0_size,
820	.takeover	= raid0_takeover,
821	.quiesce	= raid0_quiesce,
822	.congested	= raid0_congested,
823};
824
825static int __init raid0_init (void)
826{
827	return register_md_personality (&raid0_personality);
828}
829
830static void raid0_exit (void)
831{
832	unregister_md_personality (&raid0_personality);
833}
834
835module_init(raid0_init);
836module_exit(raid0_exit);
837MODULE_LICENSE("GPL");
838MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
839MODULE_ALIAS("md-personality-2"); /* RAID0 */
840MODULE_ALIAS("md-raid0");
841MODULE_ALIAS("md-level-0");