Loading...
1/*
2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8
9 RAID-0 management functions.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/blkdev.h>
22#include <linux/seq_file.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include "md.h"
26#include "raid0.h"
27#include "raid5.h"
28
29static int raid0_congested(void *data, int bits)
30{
31 struct mddev *mddev = data;
32 struct r0conf *conf = mddev->private;
33 struct md_rdev **devlist = conf->devlist;
34 int raid_disks = conf->strip_zone[0].nb_dev;
35 int i, ret = 0;
36
37 if (mddev_congested(mddev, bits))
38 return 1;
39
40 for (i = 0; i < raid_disks && !ret ; i++) {
41 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
42
43 ret |= bdi_congested(&q->backing_dev_info, bits);
44 }
45 return ret;
46}
47
48/*
49 * inform the user of the raid configuration
50*/
51static void dump_zones(struct mddev *mddev)
52{
53 int j, k;
54 sector_t zone_size = 0;
55 sector_t zone_start = 0;
56 char b[BDEVNAME_SIZE];
57 struct r0conf *conf = mddev->private;
58 int raid_disks = conf->strip_zone[0].nb_dev;
59 printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
60 mdname(mddev),
61 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
62 for (j = 0; j < conf->nr_strip_zones; j++) {
63 printk(KERN_INFO "md: zone%d=[", j);
64 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
65 printk(KERN_CONT "%s%s", k?"/":"",
66 bdevname(conf->devlist[j*raid_disks
67 + k]->bdev, b));
68 printk(KERN_CONT "]\n");
69
70 zone_size = conf->strip_zone[j].zone_end - zone_start;
71 printk(KERN_INFO " zone-offset=%10lluKB, "
72 "device-offset=%10lluKB, size=%10lluKB\n",
73 (unsigned long long)zone_start>>1,
74 (unsigned long long)conf->strip_zone[j].dev_start>>1,
75 (unsigned long long)zone_size>>1);
76 zone_start = conf->strip_zone[j].zone_end;
77 }
78 printk(KERN_INFO "\n");
79}
80
81static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
82{
83 int i, c, err;
84 sector_t curr_zone_end, sectors;
85 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
86 struct strip_zone *zone;
87 int cnt;
88 char b[BDEVNAME_SIZE];
89 char b2[BDEVNAME_SIZE];
90 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
91
92 if (!conf)
93 return -ENOMEM;
94 rdev_for_each(rdev1, mddev) {
95 pr_debug("md/raid0:%s: looking at %s\n",
96 mdname(mddev),
97 bdevname(rdev1->bdev, b));
98 c = 0;
99
100 /* round size to chunk_size */
101 sectors = rdev1->sectors;
102 sector_div(sectors, mddev->chunk_sectors);
103 rdev1->sectors = sectors * mddev->chunk_sectors;
104
105 rdev_for_each(rdev2, mddev) {
106 pr_debug("md/raid0:%s: comparing %s(%llu)"
107 " with %s(%llu)\n",
108 mdname(mddev),
109 bdevname(rdev1->bdev,b),
110 (unsigned long long)rdev1->sectors,
111 bdevname(rdev2->bdev,b2),
112 (unsigned long long)rdev2->sectors);
113 if (rdev2 == rdev1) {
114 pr_debug("md/raid0:%s: END\n",
115 mdname(mddev));
116 break;
117 }
118 if (rdev2->sectors == rdev1->sectors) {
119 /*
120 * Not unique, don't count it as a new
121 * group
122 */
123 pr_debug("md/raid0:%s: EQUAL\n",
124 mdname(mddev));
125 c = 1;
126 break;
127 }
128 pr_debug("md/raid0:%s: NOT EQUAL\n",
129 mdname(mddev));
130 }
131 if (!c) {
132 pr_debug("md/raid0:%s: ==> UNIQUE\n",
133 mdname(mddev));
134 conf->nr_strip_zones++;
135 pr_debug("md/raid0:%s: %d zones\n",
136 mdname(mddev), conf->nr_strip_zones);
137 }
138 }
139 pr_debug("md/raid0:%s: FINAL %d zones\n",
140 mdname(mddev), conf->nr_strip_zones);
141 err = -ENOMEM;
142 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
143 conf->nr_strip_zones, GFP_KERNEL);
144 if (!conf->strip_zone)
145 goto abort;
146 conf->devlist = kzalloc(sizeof(struct md_rdev*)*
147 conf->nr_strip_zones*mddev->raid_disks,
148 GFP_KERNEL);
149 if (!conf->devlist)
150 goto abort;
151
152 /* The first zone must contain all devices, so here we check that
153 * there is a proper alignment of slots to devices and find them all
154 */
155 zone = &conf->strip_zone[0];
156 cnt = 0;
157 smallest = NULL;
158 dev = conf->devlist;
159 err = -EINVAL;
160 rdev_for_each(rdev1, mddev) {
161 int j = rdev1->raid_disk;
162
163 if (mddev->level == 10) {
164 /* taking over a raid10-n2 array */
165 j /= 2;
166 rdev1->new_raid_disk = j;
167 }
168
169 if (mddev->level == 1) {
170 /* taiking over a raid1 array-
171 * we have only one active disk
172 */
173 j = 0;
174 rdev1->new_raid_disk = j;
175 }
176
177 if (j < 0 || j >= mddev->raid_disks) {
178 printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
179 "aborting!\n", mdname(mddev), j);
180 goto abort;
181 }
182 if (dev[j]) {
183 printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
184 "aborting!\n", mdname(mddev), j);
185 goto abort;
186 }
187 dev[j] = rdev1;
188
189 disk_stack_limits(mddev->gendisk, rdev1->bdev,
190 rdev1->data_offset << 9);
191
192 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
193 conf->has_merge_bvec = 1;
194
195 if (!smallest || (rdev1->sectors < smallest->sectors))
196 smallest = rdev1;
197 cnt++;
198 }
199 if (cnt != mddev->raid_disks) {
200 printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
201 "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
202 goto abort;
203 }
204 zone->nb_dev = cnt;
205 zone->zone_end = smallest->sectors * cnt;
206
207 curr_zone_end = zone->zone_end;
208
209 /* now do the other zones */
210 for (i = 1; i < conf->nr_strip_zones; i++)
211 {
212 int j;
213
214 zone = conf->strip_zone + i;
215 dev = conf->devlist + i * mddev->raid_disks;
216
217 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
218 zone->dev_start = smallest->sectors;
219 smallest = NULL;
220 c = 0;
221
222 for (j=0; j<cnt; j++) {
223 rdev = conf->devlist[j];
224 if (rdev->sectors <= zone->dev_start) {
225 pr_debug("md/raid0:%s: checking %s ... nope\n",
226 mdname(mddev),
227 bdevname(rdev->bdev, b));
228 continue;
229 }
230 pr_debug("md/raid0:%s: checking %s ..."
231 " contained as device %d\n",
232 mdname(mddev),
233 bdevname(rdev->bdev, b), c);
234 dev[c] = rdev;
235 c++;
236 if (!smallest || rdev->sectors < smallest->sectors) {
237 smallest = rdev;
238 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
239 mdname(mddev),
240 (unsigned long long)rdev->sectors);
241 }
242 }
243
244 zone->nb_dev = c;
245 sectors = (smallest->sectors - zone->dev_start) * c;
246 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
247 mdname(mddev),
248 zone->nb_dev, (unsigned long long)sectors);
249
250 curr_zone_end += sectors;
251 zone->zone_end = curr_zone_end;
252
253 pr_debug("md/raid0:%s: current zone start: %llu\n",
254 mdname(mddev),
255 (unsigned long long)smallest->sectors);
256 }
257 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
258 mddev->queue->backing_dev_info.congested_data = mddev;
259
260 /*
261 * now since we have the hard sector sizes, we can make sure
262 * chunk size is a multiple of that sector size
263 */
264 if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
265 printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
266 mdname(mddev),
267 mddev->chunk_sectors << 9);
268 goto abort;
269 }
270
271 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
272 blk_queue_io_opt(mddev->queue,
273 (mddev->chunk_sectors << 9) * mddev->raid_disks);
274
275 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
276 *private_conf = conf;
277
278 return 0;
279abort:
280 kfree(conf->strip_zone);
281 kfree(conf->devlist);
282 kfree(conf);
283 *private_conf = NULL;
284 return err;
285}
286
287/* Find the zone which holds a particular offset
288 * Update *sectorp to be an offset in that zone
289 */
290static struct strip_zone *find_zone(struct r0conf *conf,
291 sector_t *sectorp)
292{
293 int i;
294 struct strip_zone *z = conf->strip_zone;
295 sector_t sector = *sectorp;
296
297 for (i = 0; i < conf->nr_strip_zones; i++)
298 if (sector < z[i].zone_end) {
299 if (i)
300 *sectorp = sector - z[i-1].zone_end;
301 return z + i;
302 }
303 BUG();
304}
305
306/*
307 * remaps the bio to the target device. we separate two flows.
308 * power 2 flow and a general flow for the sake of perfromance
309*/
310static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
311 sector_t sector, sector_t *sector_offset)
312{
313 unsigned int sect_in_chunk;
314 sector_t chunk;
315 struct r0conf *conf = mddev->private;
316 int raid_disks = conf->strip_zone[0].nb_dev;
317 unsigned int chunk_sects = mddev->chunk_sectors;
318
319 if (is_power_of_2(chunk_sects)) {
320 int chunksect_bits = ffz(~chunk_sects);
321 /* find the sector offset inside the chunk */
322 sect_in_chunk = sector & (chunk_sects - 1);
323 sector >>= chunksect_bits;
324 /* chunk in zone */
325 chunk = *sector_offset;
326 /* quotient is the chunk in real device*/
327 sector_div(chunk, zone->nb_dev << chunksect_bits);
328 } else{
329 sect_in_chunk = sector_div(sector, chunk_sects);
330 chunk = *sector_offset;
331 sector_div(chunk, chunk_sects * zone->nb_dev);
332 }
333 /*
334 * position the bio over the real device
335 * real sector = chunk in device + starting of zone
336 * + the position in the chunk
337 */
338 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
339 return conf->devlist[(zone - conf->strip_zone)*raid_disks
340 + sector_div(sector, zone->nb_dev)];
341}
342
343/**
344 * raid0_mergeable_bvec -- tell bio layer if two requests can be merged
345 * @q: request queue
346 * @bvm: properties of new bio
347 * @biovec: the request that could be merged to it.
348 *
349 * Return amount of bytes we can accept at this offset
350 */
351static int raid0_mergeable_bvec(struct request_queue *q,
352 struct bvec_merge_data *bvm,
353 struct bio_vec *biovec)
354{
355 struct mddev *mddev = q->queuedata;
356 struct r0conf *conf = mddev->private;
357 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
358 sector_t sector_offset = sector;
359 int max;
360 unsigned int chunk_sectors = mddev->chunk_sectors;
361 unsigned int bio_sectors = bvm->bi_size >> 9;
362 struct strip_zone *zone;
363 struct md_rdev *rdev;
364 struct request_queue *subq;
365
366 if (is_power_of_2(chunk_sectors))
367 max = (chunk_sectors - ((sector & (chunk_sectors-1))
368 + bio_sectors)) << 9;
369 else
370 max = (chunk_sectors - (sector_div(sector, chunk_sectors)
371 + bio_sectors)) << 9;
372 if (max < 0)
373 max = 0; /* bio_add cannot handle a negative return */
374 if (max <= biovec->bv_len && bio_sectors == 0)
375 return biovec->bv_len;
376 if (max < biovec->bv_len)
377 /* too small already, no need to check further */
378 return max;
379 if (!conf->has_merge_bvec)
380 return max;
381
382 /* May need to check subordinate device */
383 sector = sector_offset;
384 zone = find_zone(mddev->private, §or_offset);
385 rdev = map_sector(mddev, zone, sector, §or_offset);
386 subq = bdev_get_queue(rdev->bdev);
387 if (subq->merge_bvec_fn) {
388 bvm->bi_bdev = rdev->bdev;
389 bvm->bi_sector = sector_offset + zone->dev_start +
390 rdev->data_offset;
391 return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
392 } else
393 return max;
394}
395
396static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
397{
398 sector_t array_sectors = 0;
399 struct md_rdev *rdev;
400
401 WARN_ONCE(sectors || raid_disks,
402 "%s does not support generic reshape\n", __func__);
403
404 rdev_for_each(rdev, mddev)
405 array_sectors += rdev->sectors;
406
407 return array_sectors;
408}
409
410static int raid0_stop(struct mddev *mddev);
411
412static int raid0_run(struct mddev *mddev)
413{
414 struct r0conf *conf;
415 int ret;
416
417 if (mddev->chunk_sectors == 0) {
418 printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
419 mdname(mddev));
420 return -EINVAL;
421 }
422 if (md_check_no_bitmap(mddev))
423 return -EINVAL;
424 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
425
426 /* if private is not null, we are here after takeover */
427 if (mddev->private == NULL) {
428 ret = create_strip_zones(mddev, &conf);
429 if (ret < 0)
430 return ret;
431 mddev->private = conf;
432 }
433 conf = mddev->private;
434
435 /* calculate array device size */
436 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
437
438 printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
439 mdname(mddev),
440 (unsigned long long)mddev->array_sectors);
441 /* calculate the max read-ahead size.
442 * For read-ahead of large files to be effective, we need to
443 * readahead at least twice a whole stripe. i.e. number of devices
444 * multiplied by chunk size times 2.
445 * If an individual device has an ra_pages greater than the
446 * chunk size, then we will not drive that device as hard as it
447 * wants. We consider this a configuration error: a larger
448 * chunksize should be used in that case.
449 */
450 {
451 int stripe = mddev->raid_disks *
452 (mddev->chunk_sectors << 9) / PAGE_SIZE;
453 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
454 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
455 }
456
457 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
458 dump_zones(mddev);
459
460 ret = md_integrity_register(mddev);
461 if (ret)
462 raid0_stop(mddev);
463
464 return ret;
465}
466
467static int raid0_stop(struct mddev *mddev)
468{
469 struct r0conf *conf = mddev->private;
470
471 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
472 kfree(conf->strip_zone);
473 kfree(conf->devlist);
474 kfree(conf);
475 mddev->private = NULL;
476 return 0;
477}
478
479/*
480 * Is io distribute over 1 or more chunks ?
481*/
482static inline int is_io_in_chunk_boundary(struct mddev *mddev,
483 unsigned int chunk_sects, struct bio *bio)
484{
485 if (likely(is_power_of_2(chunk_sects))) {
486 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
487 + (bio->bi_size >> 9));
488 } else{
489 sector_t sector = bio->bi_sector;
490 return chunk_sects >= (sector_div(sector, chunk_sects)
491 + (bio->bi_size >> 9));
492 }
493}
494
495static void raid0_make_request(struct mddev *mddev, struct bio *bio)
496{
497 unsigned int chunk_sects;
498 sector_t sector_offset;
499 struct strip_zone *zone;
500 struct md_rdev *tmp_dev;
501
502 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
503 md_flush_request(mddev, bio);
504 return;
505 }
506
507 chunk_sects = mddev->chunk_sectors;
508 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
509 sector_t sector = bio->bi_sector;
510 struct bio_pair *bp;
511 /* Sanity check -- queue functions should prevent this happening */
512 if (bio->bi_vcnt != 1 ||
513 bio->bi_idx != 0)
514 goto bad_map;
515 /* This is a one page bio that upper layers
516 * refuse to split for us, so we need to split it.
517 */
518 if (likely(is_power_of_2(chunk_sects)))
519 bp = bio_split(bio, chunk_sects - (sector &
520 (chunk_sects-1)));
521 else
522 bp = bio_split(bio, chunk_sects -
523 sector_div(sector, chunk_sects));
524 raid0_make_request(mddev, &bp->bio1);
525 raid0_make_request(mddev, &bp->bio2);
526 bio_pair_release(bp);
527 return;
528 }
529
530 sector_offset = bio->bi_sector;
531 zone = find_zone(mddev->private, §or_offset);
532 tmp_dev = map_sector(mddev, zone, bio->bi_sector,
533 §or_offset);
534 bio->bi_bdev = tmp_dev->bdev;
535 bio->bi_sector = sector_offset + zone->dev_start +
536 tmp_dev->data_offset;
537
538 generic_make_request(bio);
539 return;
540
541bad_map:
542 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
543 " or bigger than %dk %llu %d\n",
544 mdname(mddev), chunk_sects / 2,
545 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
546
547 bio_io_error(bio);
548 return;
549}
550
551static void raid0_status(struct seq_file *seq, struct mddev *mddev)
552{
553 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
554 return;
555}
556
557static void *raid0_takeover_raid45(struct mddev *mddev)
558{
559 struct md_rdev *rdev;
560 struct r0conf *priv_conf;
561
562 if (mddev->degraded != 1) {
563 printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
564 mdname(mddev),
565 mddev->degraded);
566 return ERR_PTR(-EINVAL);
567 }
568
569 rdev_for_each(rdev, mddev) {
570 /* check slot number for a disk */
571 if (rdev->raid_disk == mddev->raid_disks-1) {
572 printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
573 mdname(mddev));
574 return ERR_PTR(-EINVAL);
575 }
576 }
577
578 /* Set new parameters */
579 mddev->new_level = 0;
580 mddev->new_layout = 0;
581 mddev->new_chunk_sectors = mddev->chunk_sectors;
582 mddev->raid_disks--;
583 mddev->delta_disks = -1;
584 /* make sure it will be not marked as dirty */
585 mddev->recovery_cp = MaxSector;
586
587 create_strip_zones(mddev, &priv_conf);
588 return priv_conf;
589}
590
591static void *raid0_takeover_raid10(struct mddev *mddev)
592{
593 struct r0conf *priv_conf;
594
595 /* Check layout:
596 * - far_copies must be 1
597 * - near_copies must be 2
598 * - disks number must be even
599 * - all mirrors must be already degraded
600 */
601 if (mddev->layout != ((1 << 8) + 2)) {
602 printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
603 mdname(mddev),
604 mddev->layout);
605 return ERR_PTR(-EINVAL);
606 }
607 if (mddev->raid_disks & 1) {
608 printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
609 mdname(mddev));
610 return ERR_PTR(-EINVAL);
611 }
612 if (mddev->degraded != (mddev->raid_disks>>1)) {
613 printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
614 mdname(mddev));
615 return ERR_PTR(-EINVAL);
616 }
617
618 /* Set new parameters */
619 mddev->new_level = 0;
620 mddev->new_layout = 0;
621 mddev->new_chunk_sectors = mddev->chunk_sectors;
622 mddev->delta_disks = - mddev->raid_disks / 2;
623 mddev->raid_disks += mddev->delta_disks;
624 mddev->degraded = 0;
625 /* make sure it will be not marked as dirty */
626 mddev->recovery_cp = MaxSector;
627
628 create_strip_zones(mddev, &priv_conf);
629 return priv_conf;
630}
631
632static void *raid0_takeover_raid1(struct mddev *mddev)
633{
634 struct r0conf *priv_conf;
635 int chunksect;
636
637 /* Check layout:
638 * - (N - 1) mirror drives must be already faulty
639 */
640 if ((mddev->raid_disks - 1) != mddev->degraded) {
641 printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
642 mdname(mddev));
643 return ERR_PTR(-EINVAL);
644 }
645
646 /*
647 * a raid1 doesn't have the notion of chunk size, so
648 * figure out the largest suitable size we can use.
649 */
650 chunksect = 64 * 2; /* 64K by default */
651
652 /* The array must be an exact multiple of chunksize */
653 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
654 chunksect >>= 1;
655
656 if ((chunksect << 9) < PAGE_SIZE)
657 /* array size does not allow a suitable chunk size */
658 return ERR_PTR(-EINVAL);
659
660 /* Set new parameters */
661 mddev->new_level = 0;
662 mddev->new_layout = 0;
663 mddev->new_chunk_sectors = chunksect;
664 mddev->chunk_sectors = chunksect;
665 mddev->delta_disks = 1 - mddev->raid_disks;
666 mddev->raid_disks = 1;
667 /* make sure it will be not marked as dirty */
668 mddev->recovery_cp = MaxSector;
669
670 create_strip_zones(mddev, &priv_conf);
671 return priv_conf;
672}
673
674static void *raid0_takeover(struct mddev *mddev)
675{
676 /* raid0 can take over:
677 * raid4 - if all data disks are active.
678 * raid5 - providing it is Raid4 layout and one disk is faulty
679 * raid10 - assuming we have all necessary active disks
680 * raid1 - with (N -1) mirror drives faulty
681 */
682 if (mddev->level == 4)
683 return raid0_takeover_raid45(mddev);
684
685 if (mddev->level == 5) {
686 if (mddev->layout == ALGORITHM_PARITY_N)
687 return raid0_takeover_raid45(mddev);
688
689 printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
690 mdname(mddev), ALGORITHM_PARITY_N);
691 }
692
693 if (mddev->level == 10)
694 return raid0_takeover_raid10(mddev);
695
696 if (mddev->level == 1)
697 return raid0_takeover_raid1(mddev);
698
699 printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
700 mddev->level);
701
702 return ERR_PTR(-EINVAL);
703}
704
705static void raid0_quiesce(struct mddev *mddev, int state)
706{
707}
708
709static struct md_personality raid0_personality=
710{
711 .name = "raid0",
712 .level = 0,
713 .owner = THIS_MODULE,
714 .make_request = raid0_make_request,
715 .run = raid0_run,
716 .stop = raid0_stop,
717 .status = raid0_status,
718 .size = raid0_size,
719 .takeover = raid0_takeover,
720 .quiesce = raid0_quiesce,
721};
722
723static int __init raid0_init (void)
724{
725 return register_md_personality (&raid0_personality);
726}
727
728static void raid0_exit (void)
729{
730 unregister_md_personality (&raid0_personality);
731}
732
733module_init(raid0_init);
734module_exit(raid0_exit);
735MODULE_LICENSE("GPL");
736MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
737MODULE_ALIAS("md-personality-2"); /* RAID0 */
738MODULE_ALIAS("md-raid0");
739MODULE_ALIAS("md-level-0");
1/*
2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8 RAID-0 management functions.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 You should have received a copy of the GNU General Public License
16 (for example /usr/src/linux/COPYING); if not, write to the Free
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/
19
20#include <linux/blkdev.h>
21#include <linux/seq_file.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include "md.h"
25#include "raid0.h"
26#include "raid5.h"
27
28static int raid0_congested(struct mddev *mddev, int bits)
29{
30 struct r0conf *conf = mddev->private;
31 struct md_rdev **devlist = conf->devlist;
32 int raid_disks = conf->strip_zone[0].nb_dev;
33 int i, ret = 0;
34
35 for (i = 0; i < raid_disks && !ret ; i++) {
36 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
37
38 ret |= bdi_congested(&q->backing_dev_info, bits);
39 }
40 return ret;
41}
42
43/*
44 * inform the user of the raid configuration
45*/
46static void dump_zones(struct mddev *mddev)
47{
48 int j, k;
49 sector_t zone_size = 0;
50 sector_t zone_start = 0;
51 char b[BDEVNAME_SIZE];
52 struct r0conf *conf = mddev->private;
53 int raid_disks = conf->strip_zone[0].nb_dev;
54 printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
55 mdname(mddev),
56 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
57 for (j = 0; j < conf->nr_strip_zones; j++) {
58 printk(KERN_INFO "md: zone%d=[", j);
59 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
60 printk(KERN_CONT "%s%s", k?"/":"",
61 bdevname(conf->devlist[j*raid_disks
62 + k]->bdev, b));
63 printk(KERN_CONT "]\n");
64
65 zone_size = conf->strip_zone[j].zone_end - zone_start;
66 printk(KERN_INFO " zone-offset=%10lluKB, "
67 "device-offset=%10lluKB, size=%10lluKB\n",
68 (unsigned long long)zone_start>>1,
69 (unsigned long long)conf->strip_zone[j].dev_start>>1,
70 (unsigned long long)zone_size>>1);
71 zone_start = conf->strip_zone[j].zone_end;
72 }
73}
74
75static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
76{
77 int i, c, err;
78 sector_t curr_zone_end, sectors;
79 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
80 struct strip_zone *zone;
81 int cnt;
82 char b[BDEVNAME_SIZE];
83 char b2[BDEVNAME_SIZE];
84 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
85 unsigned short blksize = 512;
86
87 *private_conf = ERR_PTR(-ENOMEM);
88 if (!conf)
89 return -ENOMEM;
90 rdev_for_each(rdev1, mddev) {
91 pr_debug("md/raid0:%s: looking at %s\n",
92 mdname(mddev),
93 bdevname(rdev1->bdev, b));
94 c = 0;
95
96 /* round size to chunk_size */
97 sectors = rdev1->sectors;
98 sector_div(sectors, mddev->chunk_sectors);
99 rdev1->sectors = sectors * mddev->chunk_sectors;
100
101 blksize = max(blksize, queue_logical_block_size(
102 rdev1->bdev->bd_disk->queue));
103
104 rdev_for_each(rdev2, mddev) {
105 pr_debug("md/raid0:%s: comparing %s(%llu)"
106 " with %s(%llu)\n",
107 mdname(mddev),
108 bdevname(rdev1->bdev,b),
109 (unsigned long long)rdev1->sectors,
110 bdevname(rdev2->bdev,b2),
111 (unsigned long long)rdev2->sectors);
112 if (rdev2 == rdev1) {
113 pr_debug("md/raid0:%s: END\n",
114 mdname(mddev));
115 break;
116 }
117 if (rdev2->sectors == rdev1->sectors) {
118 /*
119 * Not unique, don't count it as a new
120 * group
121 */
122 pr_debug("md/raid0:%s: EQUAL\n",
123 mdname(mddev));
124 c = 1;
125 break;
126 }
127 pr_debug("md/raid0:%s: NOT EQUAL\n",
128 mdname(mddev));
129 }
130 if (!c) {
131 pr_debug("md/raid0:%s: ==> UNIQUE\n",
132 mdname(mddev));
133 conf->nr_strip_zones++;
134 pr_debug("md/raid0:%s: %d zones\n",
135 mdname(mddev), conf->nr_strip_zones);
136 }
137 }
138 pr_debug("md/raid0:%s: FINAL %d zones\n",
139 mdname(mddev), conf->nr_strip_zones);
140 /*
141 * now since we have the hard sector sizes, we can make sure
142 * chunk size is a multiple of that sector size
143 */
144 if ((mddev->chunk_sectors << 9) % blksize) {
145 printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
146 mdname(mddev),
147 mddev->chunk_sectors << 9, blksize);
148 err = -EINVAL;
149 goto abort;
150 }
151
152 err = -ENOMEM;
153 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
154 conf->nr_strip_zones, GFP_KERNEL);
155 if (!conf->strip_zone)
156 goto abort;
157 conf->devlist = kzalloc(sizeof(struct md_rdev*)*
158 conf->nr_strip_zones*mddev->raid_disks,
159 GFP_KERNEL);
160 if (!conf->devlist)
161 goto abort;
162
163 /* The first zone must contain all devices, so here we check that
164 * there is a proper alignment of slots to devices and find them all
165 */
166 zone = &conf->strip_zone[0];
167 cnt = 0;
168 smallest = NULL;
169 dev = conf->devlist;
170 err = -EINVAL;
171 rdev_for_each(rdev1, mddev) {
172 int j = rdev1->raid_disk;
173
174 if (mddev->level == 10) {
175 /* taking over a raid10-n2 array */
176 j /= 2;
177 rdev1->new_raid_disk = j;
178 }
179
180 if (mddev->level == 1) {
181 /* taiking over a raid1 array-
182 * we have only one active disk
183 */
184 j = 0;
185 rdev1->new_raid_disk = j;
186 }
187
188 if (j < 0) {
189 printk(KERN_ERR
190 "md/raid0:%s: remove inactive devices before converting to RAID0\n",
191 mdname(mddev));
192 goto abort;
193 }
194 if (j >= mddev->raid_disks) {
195 printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
196 "aborting!\n", mdname(mddev), j);
197 goto abort;
198 }
199 if (dev[j]) {
200 printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
201 "aborting!\n", mdname(mddev), j);
202 goto abort;
203 }
204 dev[j] = rdev1;
205
206 if (!smallest || (rdev1->sectors < smallest->sectors))
207 smallest = rdev1;
208 cnt++;
209 }
210 if (cnt != mddev->raid_disks) {
211 printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
212 "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
213 goto abort;
214 }
215 zone->nb_dev = cnt;
216 zone->zone_end = smallest->sectors * cnt;
217
218 curr_zone_end = zone->zone_end;
219
220 /* now do the other zones */
221 for (i = 1; i < conf->nr_strip_zones; i++)
222 {
223 int j;
224
225 zone = conf->strip_zone + i;
226 dev = conf->devlist + i * mddev->raid_disks;
227
228 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
229 zone->dev_start = smallest->sectors;
230 smallest = NULL;
231 c = 0;
232
233 for (j=0; j<cnt; j++) {
234 rdev = conf->devlist[j];
235 if (rdev->sectors <= zone->dev_start) {
236 pr_debug("md/raid0:%s: checking %s ... nope\n",
237 mdname(mddev),
238 bdevname(rdev->bdev, b));
239 continue;
240 }
241 pr_debug("md/raid0:%s: checking %s ..."
242 " contained as device %d\n",
243 mdname(mddev),
244 bdevname(rdev->bdev, b), c);
245 dev[c] = rdev;
246 c++;
247 if (!smallest || rdev->sectors < smallest->sectors) {
248 smallest = rdev;
249 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
250 mdname(mddev),
251 (unsigned long long)rdev->sectors);
252 }
253 }
254
255 zone->nb_dev = c;
256 sectors = (smallest->sectors - zone->dev_start) * c;
257 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
258 mdname(mddev),
259 zone->nb_dev, (unsigned long long)sectors);
260
261 curr_zone_end += sectors;
262 zone->zone_end = curr_zone_end;
263
264 pr_debug("md/raid0:%s: current zone start: %llu\n",
265 mdname(mddev),
266 (unsigned long long)smallest->sectors);
267 }
268
269 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
270 *private_conf = conf;
271
272 return 0;
273abort:
274 kfree(conf->strip_zone);
275 kfree(conf->devlist);
276 kfree(conf);
277 *private_conf = ERR_PTR(err);
278 return err;
279}
280
281/* Find the zone which holds a particular offset
282 * Update *sectorp to be an offset in that zone
283 */
284static struct strip_zone *find_zone(struct r0conf *conf,
285 sector_t *sectorp)
286{
287 int i;
288 struct strip_zone *z = conf->strip_zone;
289 sector_t sector = *sectorp;
290
291 for (i = 0; i < conf->nr_strip_zones; i++)
292 if (sector < z[i].zone_end) {
293 if (i)
294 *sectorp = sector - z[i-1].zone_end;
295 return z + i;
296 }
297 BUG();
298}
299
300/*
301 * remaps the bio to the target device. we separate two flows.
302 * power 2 flow and a general flow for the sake of performance
303*/
304static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
305 sector_t sector, sector_t *sector_offset)
306{
307 unsigned int sect_in_chunk;
308 sector_t chunk;
309 struct r0conf *conf = mddev->private;
310 int raid_disks = conf->strip_zone[0].nb_dev;
311 unsigned int chunk_sects = mddev->chunk_sectors;
312
313 if (is_power_of_2(chunk_sects)) {
314 int chunksect_bits = ffz(~chunk_sects);
315 /* find the sector offset inside the chunk */
316 sect_in_chunk = sector & (chunk_sects - 1);
317 sector >>= chunksect_bits;
318 /* chunk in zone */
319 chunk = *sector_offset;
320 /* quotient is the chunk in real device*/
321 sector_div(chunk, zone->nb_dev << chunksect_bits);
322 } else{
323 sect_in_chunk = sector_div(sector, chunk_sects);
324 chunk = *sector_offset;
325 sector_div(chunk, chunk_sects * zone->nb_dev);
326 }
327 /*
328 * position the bio over the real device
329 * real sector = chunk in device + starting of zone
330 * + the position in the chunk
331 */
332 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
333 return conf->devlist[(zone - conf->strip_zone)*raid_disks
334 + sector_div(sector, zone->nb_dev)];
335}
336
337static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
338{
339 sector_t array_sectors = 0;
340 struct md_rdev *rdev;
341
342 WARN_ONCE(sectors || raid_disks,
343 "%s does not support generic reshape\n", __func__);
344
345 rdev_for_each(rdev, mddev)
346 array_sectors += (rdev->sectors &
347 ~(sector_t)(mddev->chunk_sectors-1));
348
349 return array_sectors;
350}
351
352static void raid0_free(struct mddev *mddev, void *priv);
353
354static int raid0_run(struct mddev *mddev)
355{
356 struct r0conf *conf;
357 int ret;
358
359 if (mddev->chunk_sectors == 0) {
360 printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
361 mdname(mddev));
362 return -EINVAL;
363 }
364 if (md_check_no_bitmap(mddev))
365 return -EINVAL;
366
367 /* if private is not null, we are here after takeover */
368 if (mddev->private == NULL) {
369 ret = create_strip_zones(mddev, &conf);
370 if (ret < 0)
371 return ret;
372 mddev->private = conf;
373 }
374 conf = mddev->private;
375 if (mddev->queue) {
376 struct md_rdev *rdev;
377 bool discard_supported = false;
378
379 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
380 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
381 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
382
383 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
384 blk_queue_io_opt(mddev->queue,
385 (mddev->chunk_sectors << 9) * mddev->raid_disks);
386
387 rdev_for_each(rdev, mddev) {
388 disk_stack_limits(mddev->gendisk, rdev->bdev,
389 rdev->data_offset << 9);
390 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
391 discard_supported = true;
392 }
393 if (!discard_supported)
394 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
395 else
396 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
397 }
398
399 /* calculate array device size */
400 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
401
402 printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
403 mdname(mddev),
404 (unsigned long long)mddev->array_sectors);
405
406 if (mddev->queue) {
407 /* calculate the max read-ahead size.
408 * For read-ahead of large files to be effective, we need to
409 * readahead at least twice a whole stripe. i.e. number of devices
410 * multiplied by chunk size times 2.
411 * If an individual device has an ra_pages greater than the
412 * chunk size, then we will not drive that device as hard as it
413 * wants. We consider this a configuration error: a larger
414 * chunksize should be used in that case.
415 */
416 int stripe = mddev->raid_disks *
417 (mddev->chunk_sectors << 9) / PAGE_SIZE;
418 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
419 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
420 }
421
422 dump_zones(mddev);
423
424 ret = md_integrity_register(mddev);
425
426 return ret;
427}
428
429static void raid0_free(struct mddev *mddev, void *priv)
430{
431 struct r0conf *conf = priv;
432
433 kfree(conf->strip_zone);
434 kfree(conf->devlist);
435 kfree(conf);
436}
437
438/*
439 * Is io distribute over 1 or more chunks ?
440*/
441static inline int is_io_in_chunk_boundary(struct mddev *mddev,
442 unsigned int chunk_sects, struct bio *bio)
443{
444 if (likely(is_power_of_2(chunk_sects))) {
445 return chunk_sects >=
446 ((bio->bi_iter.bi_sector & (chunk_sects-1))
447 + bio_sectors(bio));
448 } else{
449 sector_t sector = bio->bi_iter.bi_sector;
450 return chunk_sects >= (sector_div(sector, chunk_sects)
451 + bio_sectors(bio));
452 }
453}
454
455static void raid0_make_request(struct mddev *mddev, struct bio *bio)
456{
457 struct strip_zone *zone;
458 struct md_rdev *tmp_dev;
459 struct bio *split;
460
461 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
462 md_flush_request(mddev, bio);
463 return;
464 }
465
466 do {
467 sector_t sector = bio->bi_iter.bi_sector;
468 unsigned chunk_sects = mddev->chunk_sectors;
469
470 unsigned sectors = chunk_sects -
471 (likely(is_power_of_2(chunk_sects))
472 ? (sector & (chunk_sects-1))
473 : sector_div(sector, chunk_sects));
474
475 /* Restore due to sector_div */
476 sector = bio->bi_iter.bi_sector;
477
478 if (sectors < bio_sectors(bio)) {
479 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
480 bio_chain(split, bio);
481 } else {
482 split = bio;
483 }
484
485 zone = find_zone(mddev->private, §or);
486 tmp_dev = map_sector(mddev, zone, sector, §or);
487 split->bi_bdev = tmp_dev->bdev;
488 split->bi_iter.bi_sector = sector + zone->dev_start +
489 tmp_dev->data_offset;
490
491 if (unlikely((split->bi_rw & REQ_DISCARD) &&
492 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
493 /* Just ignore it */
494 bio_endio(split);
495 } else
496 generic_make_request(split);
497 } while (split != bio);
498}
499
500static void raid0_status(struct seq_file *seq, struct mddev *mddev)
501{
502 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
503 return;
504}
505
506static void *raid0_takeover_raid45(struct mddev *mddev)
507{
508 struct md_rdev *rdev;
509 struct r0conf *priv_conf;
510
511 if (mddev->degraded != 1) {
512 printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
513 mdname(mddev),
514 mddev->degraded);
515 return ERR_PTR(-EINVAL);
516 }
517
518 rdev_for_each(rdev, mddev) {
519 /* check slot number for a disk */
520 if (rdev->raid_disk == mddev->raid_disks-1) {
521 printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
522 mdname(mddev));
523 return ERR_PTR(-EINVAL);
524 }
525 rdev->sectors = mddev->dev_sectors;
526 }
527
528 /* Set new parameters */
529 mddev->new_level = 0;
530 mddev->new_layout = 0;
531 mddev->new_chunk_sectors = mddev->chunk_sectors;
532 mddev->raid_disks--;
533 mddev->delta_disks = -1;
534 /* make sure it will be not marked as dirty */
535 mddev->recovery_cp = MaxSector;
536
537 create_strip_zones(mddev, &priv_conf);
538 return priv_conf;
539}
540
541static void *raid0_takeover_raid10(struct mddev *mddev)
542{
543 struct r0conf *priv_conf;
544
545 /* Check layout:
546 * - far_copies must be 1
547 * - near_copies must be 2
548 * - disks number must be even
549 * - all mirrors must be already degraded
550 */
551 if (mddev->layout != ((1 << 8) + 2)) {
552 printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
553 mdname(mddev),
554 mddev->layout);
555 return ERR_PTR(-EINVAL);
556 }
557 if (mddev->raid_disks & 1) {
558 printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
559 mdname(mddev));
560 return ERR_PTR(-EINVAL);
561 }
562 if (mddev->degraded != (mddev->raid_disks>>1)) {
563 printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
564 mdname(mddev));
565 return ERR_PTR(-EINVAL);
566 }
567
568 /* Set new parameters */
569 mddev->new_level = 0;
570 mddev->new_layout = 0;
571 mddev->new_chunk_sectors = mddev->chunk_sectors;
572 mddev->delta_disks = - mddev->raid_disks / 2;
573 mddev->raid_disks += mddev->delta_disks;
574 mddev->degraded = 0;
575 /* make sure it will be not marked as dirty */
576 mddev->recovery_cp = MaxSector;
577
578 create_strip_zones(mddev, &priv_conf);
579 return priv_conf;
580}
581
582static void *raid0_takeover_raid1(struct mddev *mddev)
583{
584 struct r0conf *priv_conf;
585 int chunksect;
586
587 /* Check layout:
588 * - (N - 1) mirror drives must be already faulty
589 */
590 if ((mddev->raid_disks - 1) != mddev->degraded) {
591 printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
592 mdname(mddev));
593 return ERR_PTR(-EINVAL);
594 }
595
596 /*
597 * a raid1 doesn't have the notion of chunk size, so
598 * figure out the largest suitable size we can use.
599 */
600 chunksect = 64 * 2; /* 64K by default */
601
602 /* The array must be an exact multiple of chunksize */
603 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
604 chunksect >>= 1;
605
606 if ((chunksect << 9) < PAGE_SIZE)
607 /* array size does not allow a suitable chunk size */
608 return ERR_PTR(-EINVAL);
609
610 /* Set new parameters */
611 mddev->new_level = 0;
612 mddev->new_layout = 0;
613 mddev->new_chunk_sectors = chunksect;
614 mddev->chunk_sectors = chunksect;
615 mddev->delta_disks = 1 - mddev->raid_disks;
616 mddev->raid_disks = 1;
617 /* make sure it will be not marked as dirty */
618 mddev->recovery_cp = MaxSector;
619
620 create_strip_zones(mddev, &priv_conf);
621 return priv_conf;
622}
623
624static void *raid0_takeover(struct mddev *mddev)
625{
626 /* raid0 can take over:
627 * raid4 - if all data disks are active.
628 * raid5 - providing it is Raid4 layout and one disk is faulty
629 * raid10 - assuming we have all necessary active disks
630 * raid1 - with (N -1) mirror drives faulty
631 */
632
633 if (mddev->bitmap) {
634 printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n",
635 mdname(mddev));
636 return ERR_PTR(-EBUSY);
637 }
638 if (mddev->level == 4)
639 return raid0_takeover_raid45(mddev);
640
641 if (mddev->level == 5) {
642 if (mddev->layout == ALGORITHM_PARITY_N)
643 return raid0_takeover_raid45(mddev);
644
645 printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
646 mdname(mddev), ALGORITHM_PARITY_N);
647 }
648
649 if (mddev->level == 10)
650 return raid0_takeover_raid10(mddev);
651
652 if (mddev->level == 1)
653 return raid0_takeover_raid1(mddev);
654
655 printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
656 mddev->level);
657
658 return ERR_PTR(-EINVAL);
659}
660
661static void raid0_quiesce(struct mddev *mddev, int state)
662{
663}
664
665static struct md_personality raid0_personality=
666{
667 .name = "raid0",
668 .level = 0,
669 .owner = THIS_MODULE,
670 .make_request = raid0_make_request,
671 .run = raid0_run,
672 .free = raid0_free,
673 .status = raid0_status,
674 .size = raid0_size,
675 .takeover = raid0_takeover,
676 .quiesce = raid0_quiesce,
677 .congested = raid0_congested,
678};
679
680static int __init raid0_init (void)
681{
682 return register_md_personality (&raid0_personality);
683}
684
685static void raid0_exit (void)
686{
687 unregister_md_personality (&raid0_personality);
688}
689
690module_init(raid0_init);
691module_exit(raid0_exit);
692MODULE_LICENSE("GPL");
693MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
694MODULE_ALIAS("md-personality-2"); /* RAID0 */
695MODULE_ALIAS("md-raid0");
696MODULE_ALIAS("md-level-0");