Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
5 */
6#include <linux/kernel.h>
7#include <linux/wait.h>
8#include <linux/blkdev.h>
9#include <linux/slab.h>
10#include <linux/raid/md_p.h>
11#include <linux/crc32c.h>
12#include <linux/random.h>
13#include <linux/kthread.h>
14#include <linux/types.h>
15#include "md.h"
16#include "raid5.h"
17#include "md-bitmap.h"
18#include "raid5-log.h"
19
20/*
21 * metadata/data stored in disk with 4k size unit (a block) regardless
22 * underneath hardware sector size. only works with PAGE_SIZE == 4096
23 */
24#define BLOCK_SECTORS (8)
25#define BLOCK_SECTOR_SHIFT (3)
26
27/*
28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
29 *
30 * In write through mode, the reclaim runs every log->max_free_space.
31 * This can prevent the recovery scans for too long
32 */
33#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
34#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
35
36/* wake up reclaim thread periodically */
37#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
38/* start flush with these full stripes */
39#define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
40/* reclaim stripes in groups */
41#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
42
43/*
44 * We only need 2 bios per I/O unit to make progress, but ensure we
45 * have a few more available to not get too tight.
46 */
47#define R5L_POOL_SIZE 4
48
49static char *r5c_journal_mode_str[] = {"write-through",
50 "write-back"};
51/*
52 * raid5 cache state machine
53 *
54 * With the RAID cache, each stripe works in two phases:
55 * - caching phase
56 * - writing-out phase
57 *
58 * These two phases are controlled by bit STRIPE_R5C_CACHING:
59 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
60 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
61 *
62 * When there is no journal, or the journal is in write-through mode,
63 * the stripe is always in writing-out phase.
64 *
65 * For write-back journal, the stripe is sent to caching phase on write
66 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
67 * the write-out phase by clearing STRIPE_R5C_CACHING.
68 *
69 * Stripes in caching phase do not write the raid disks. Instead, all
70 * writes are committed from the log device. Therefore, a stripe in
71 * caching phase handles writes as:
72 * - write to log device
73 * - return IO
74 *
75 * Stripes in writing-out phase handle writes as:
76 * - calculate parity
77 * - write pending data and parity to journal
78 * - write data and parity to raid disks
79 * - return IO for pending writes
80 */
81
82struct r5l_log {
83 struct md_rdev *rdev;
84
85 u32 uuid_checksum;
86
87 sector_t device_size; /* log device size, round to
88 * BLOCK_SECTORS */
89 sector_t max_free_space; /* reclaim run if free space is at
90 * this size */
91
92 sector_t last_checkpoint; /* log tail. where recovery scan
93 * starts from */
94 u64 last_cp_seq; /* log tail sequence */
95
96 sector_t log_start; /* log head. where new data appends */
97 u64 seq; /* log head sequence */
98
99 sector_t next_checkpoint;
100
101 struct mutex io_mutex;
102 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
103
104 spinlock_t io_list_lock;
105 struct list_head running_ios; /* io_units which are still running,
106 * and have not yet been completely
107 * written to the log */
108 struct list_head io_end_ios; /* io_units which have been completely
109 * written to the log but not yet written
110 * to the RAID */
111 struct list_head flushing_ios; /* io_units which are waiting for log
112 * cache flush */
113 struct list_head finished_ios; /* io_units which settle down in log disk */
114 struct bio flush_bio;
115
116 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
117
118 struct kmem_cache *io_kc;
119 mempool_t io_pool;
120 struct bio_set bs;
121 mempool_t meta_pool;
122
123 struct md_thread __rcu *reclaim_thread;
124 unsigned long reclaim_target; /* number of space that need to be
125 * reclaimed. if it's 0, reclaim spaces
126 * used by io_units which are in
127 * IO_UNIT_STRIPE_END state (eg, reclaim
128 * doesn't wait for specific io_unit
129 * switching to IO_UNIT_STRIPE_END
130 * state) */
131 wait_queue_head_t iounit_wait;
132
133 struct list_head no_space_stripes; /* pending stripes, log has no space */
134 spinlock_t no_space_stripes_lock;
135
136 bool need_cache_flush;
137
138 /* for r5c_cache */
139 enum r5c_journal_mode r5c_journal_mode;
140
141 /* all stripes in r5cache, in the order of seq at sh->log_start */
142 struct list_head stripe_in_journal_list;
143
144 spinlock_t stripe_in_journal_lock;
145 atomic_t stripe_in_journal_count;
146
147 /* to submit async io_units, to fulfill ordering of flush */
148 struct work_struct deferred_io_work;
149 /* to disable write back during in degraded mode */
150 struct work_struct disable_writeback_work;
151
152 /* to for chunk_aligned_read in writeback mode, details below */
153 spinlock_t tree_lock;
154 struct radix_tree_root big_stripe_tree;
155};
156
157/*
158 * Enable chunk_aligned_read() with write back cache.
159 *
160 * Each chunk may contain more than one stripe (for example, a 256kB
161 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
162 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
163 * For each big_stripe, we count how many stripes of this big_stripe
164 * are in the write back cache. These data are tracked in a radix tree
165 * (big_stripe_tree). We use radix_tree item pointer as the counter.
166 * r5c_tree_index() is used to calculate keys for the radix tree.
167 *
168 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
169 * big_stripe of each chunk in the tree. If this big_stripe is in the
170 * tree, chunk_aligned_read() aborts. This look up is protected by
171 * rcu_read_lock().
172 *
173 * It is necessary to remember whether a stripe is counted in
174 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
175 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
176 * two flags are set, the stripe is counted in big_stripe_tree. This
177 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
178 * r5c_try_caching_write(); and moving clear_bit of
179 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
180 * r5c_finish_stripe_write_out().
181 */
182
183/*
184 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
185 * So it is necessary to left shift the counter by 2 bits before using it
186 * as data pointer of the tree.
187 */
188#define R5C_RADIX_COUNT_SHIFT 2
189
190/*
191 * calculate key for big_stripe_tree
192 *
193 * sect: align_bi->bi_iter.bi_sector or sh->sector
194 */
195static inline sector_t r5c_tree_index(struct r5conf *conf,
196 sector_t sect)
197{
198 sector_div(sect, conf->chunk_sectors);
199 return sect;
200}
201
202/*
203 * an IO range starts from a meta data block and end at the next meta data
204 * block. The io unit's the meta data block tracks data/parity followed it. io
205 * unit is written to log disk with normal write, as we always flush log disk
206 * first and then start move data to raid disks, there is no requirement to
207 * write io unit with FLUSH/FUA
208 */
209struct r5l_io_unit {
210 struct r5l_log *log;
211
212 struct page *meta_page; /* store meta block */
213 int meta_offset; /* current offset in meta_page */
214
215 struct bio *current_bio;/* current_bio accepting new data */
216
217 atomic_t pending_stripe;/* how many stripes not flushed to raid */
218 u64 seq; /* seq number of the metablock */
219 sector_t log_start; /* where the io_unit starts */
220 sector_t log_end; /* where the io_unit ends */
221 struct list_head log_sibling; /* log->running_ios */
222 struct list_head stripe_list; /* stripes added to the io_unit */
223
224 int state;
225 bool need_split_bio;
226 struct bio *split_bio;
227
228 unsigned int has_flush:1; /* include flush request */
229 unsigned int has_fua:1; /* include fua request */
230 unsigned int has_null_flush:1; /* include null flush request */
231 unsigned int has_flush_payload:1; /* include flush payload */
232 /*
233 * io isn't sent yet, flush/fua request can only be submitted till it's
234 * the first IO in running_ios list
235 */
236 unsigned int io_deferred:1;
237
238 struct bio_list flush_barriers; /* size == 0 flush bios */
239};
240
241/* r5l_io_unit state */
242enum r5l_io_unit_state {
243 IO_UNIT_RUNNING = 0, /* accepting new IO */
244 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
245 * don't accepting new bio */
246 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
247 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
248};
249
250bool r5c_is_writeback(struct r5l_log *log)
251{
252 return (log != NULL &&
253 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
254}
255
256static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
257{
258 start += inc;
259 if (start >= log->device_size)
260 start = start - log->device_size;
261 return start;
262}
263
264static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
265 sector_t end)
266{
267 if (end >= start)
268 return end - start;
269 else
270 return end + log->device_size - start;
271}
272
273static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
274{
275 sector_t used_size;
276
277 used_size = r5l_ring_distance(log, log->last_checkpoint,
278 log->log_start);
279
280 return log->device_size > used_size + size;
281}
282
283static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
284 enum r5l_io_unit_state state)
285{
286 if (WARN_ON(io->state >= state))
287 return;
288 io->state = state;
289}
290
291static void
292r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
293{
294 struct bio *wbi, *wbi2;
295
296 wbi = dev->written;
297 dev->written = NULL;
298 while (wbi && wbi->bi_iter.bi_sector <
299 dev->sector + RAID5_STRIPE_SECTORS(conf)) {
300 wbi2 = r5_next_bio(conf, wbi, dev->sector);
301 md_write_end(conf->mddev);
302 bio_endio(wbi);
303 wbi = wbi2;
304 }
305}
306
307void r5c_handle_cached_data_endio(struct r5conf *conf,
308 struct stripe_head *sh, int disks)
309{
310 int i;
311
312 for (i = sh->disks; i--; ) {
313 if (sh->dev[i].written) {
314 set_bit(R5_UPTODATE, &sh->dev[i].flags);
315 r5c_return_dev_pending_writes(conf, &sh->dev[i]);
316 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
317 RAID5_STRIPE_SECTORS(conf),
318 !test_bit(STRIPE_DEGRADED, &sh->state),
319 0);
320 }
321 }
322}
323
324void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
325
326/* Check whether we should flush some stripes to free up stripe cache */
327void r5c_check_stripe_cache_usage(struct r5conf *conf)
328{
329 int total_cached;
330 struct r5l_log *log = READ_ONCE(conf->log);
331
332 if (!r5c_is_writeback(log))
333 return;
334
335 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
336 atomic_read(&conf->r5c_cached_full_stripes);
337
338 /*
339 * The following condition is true for either of the following:
340 * - stripe cache pressure high:
341 * total_cached > 3/4 min_nr_stripes ||
342 * empty_inactive_list_nr > 0
343 * - stripe cache pressure moderate:
344 * total_cached > 1/2 min_nr_stripes
345 */
346 if (total_cached > conf->min_nr_stripes * 1 / 2 ||
347 atomic_read(&conf->empty_inactive_list_nr) > 0)
348 r5l_wake_reclaim(log, 0);
349}
350
351/*
352 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
353 * stripes in the cache
354 */
355void r5c_check_cached_full_stripe(struct r5conf *conf)
356{
357 struct r5l_log *log = READ_ONCE(conf->log);
358
359 if (!r5c_is_writeback(log))
360 return;
361
362 /*
363 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
364 * or a full stripe (chunk size / 4k stripes).
365 */
366 if (atomic_read(&conf->r5c_cached_full_stripes) >=
367 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
368 conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
369 r5l_wake_reclaim(log, 0);
370}
371
372/*
373 * Total log space (in sectors) needed to flush all data in cache
374 *
375 * To avoid deadlock due to log space, it is necessary to reserve log
376 * space to flush critical stripes (stripes that occupying log space near
377 * last_checkpoint). This function helps check how much log space is
378 * required to flush all cached stripes.
379 *
380 * To reduce log space requirements, two mechanisms are used to give cache
381 * flush higher priorities:
382 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
383 * stripes ALREADY in journal can be flushed w/o pending writes;
384 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
385 * can be delayed (r5l_add_no_space_stripe).
386 *
387 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
388 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
389 * pages of journal space. For stripes that has not passed 1, flushing it
390 * requires (conf->raid_disks + 1) pages of journal space. There are at
391 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
392 * required to flush all cached stripes (in pages) is:
393 *
394 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
395 * (group_cnt + 1) * (raid_disks + 1)
396 * or
397 * (stripe_in_journal_count) * (max_degraded + 1) +
398 * (group_cnt + 1) * (raid_disks - max_degraded)
399 */
400static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
401{
402 struct r5l_log *log = READ_ONCE(conf->log);
403
404 if (!r5c_is_writeback(log))
405 return 0;
406
407 return BLOCK_SECTORS *
408 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
409 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
410}
411
412/*
413 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
414 *
415 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
416 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
417 * device is less than 2x of reclaim_required_space.
418 */
419static inline void r5c_update_log_state(struct r5l_log *log)
420{
421 struct r5conf *conf = log->rdev->mddev->private;
422 sector_t free_space;
423 sector_t reclaim_space;
424 bool wake_reclaim = false;
425
426 if (!r5c_is_writeback(log))
427 return;
428
429 free_space = r5l_ring_distance(log, log->log_start,
430 log->last_checkpoint);
431 reclaim_space = r5c_log_required_to_flush_cache(conf);
432 if (free_space < 2 * reclaim_space)
433 set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
434 else {
435 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
436 wake_reclaim = true;
437 clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
438 }
439 if (free_space < 3 * reclaim_space)
440 set_bit(R5C_LOG_TIGHT, &conf->cache_state);
441 else
442 clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
443
444 if (wake_reclaim)
445 r5l_wake_reclaim(log, 0);
446}
447
448/*
449 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
450 * This function should only be called in write-back mode.
451 */
452void r5c_make_stripe_write_out(struct stripe_head *sh)
453{
454 struct r5conf *conf = sh->raid_conf;
455 struct r5l_log *log = READ_ONCE(conf->log);
456
457 BUG_ON(!r5c_is_writeback(log));
458
459 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
460 clear_bit(STRIPE_R5C_CACHING, &sh->state);
461
462 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
463 atomic_inc(&conf->preread_active_stripes);
464}
465
466static void r5c_handle_data_cached(struct stripe_head *sh)
467{
468 int i;
469
470 for (i = sh->disks; i--; )
471 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
472 set_bit(R5_InJournal, &sh->dev[i].flags);
473 clear_bit(R5_LOCKED, &sh->dev[i].flags);
474 }
475 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
476}
477
478/*
479 * this journal write must contain full parity,
480 * it may also contain some data pages
481 */
482static void r5c_handle_parity_cached(struct stripe_head *sh)
483{
484 int i;
485
486 for (i = sh->disks; i--; )
487 if (test_bit(R5_InJournal, &sh->dev[i].flags))
488 set_bit(R5_Wantwrite, &sh->dev[i].flags);
489}
490
491/*
492 * Setting proper flags after writing (or flushing) data and/or parity to the
493 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
494 */
495static void r5c_finish_cache_stripe(struct stripe_head *sh)
496{
497 struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
498
499 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
500 BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
501 /*
502 * Set R5_InJournal for parity dev[pd_idx]. This means
503 * all data AND parity in the journal. For RAID 6, it is
504 * NOT necessary to set the flag for dev[qd_idx], as the
505 * two parities are written out together.
506 */
507 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
508 } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
509 r5c_handle_data_cached(sh);
510 } else {
511 r5c_handle_parity_cached(sh);
512 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
513 }
514}
515
516static void r5l_io_run_stripes(struct r5l_io_unit *io)
517{
518 struct stripe_head *sh, *next;
519
520 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
521 list_del_init(&sh->log_list);
522
523 r5c_finish_cache_stripe(sh);
524
525 set_bit(STRIPE_HANDLE, &sh->state);
526 raid5_release_stripe(sh);
527 }
528}
529
530static void r5l_log_run_stripes(struct r5l_log *log)
531{
532 struct r5l_io_unit *io, *next;
533
534 lockdep_assert_held(&log->io_list_lock);
535
536 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
537 /* don't change list order */
538 if (io->state < IO_UNIT_IO_END)
539 break;
540
541 list_move_tail(&io->log_sibling, &log->finished_ios);
542 r5l_io_run_stripes(io);
543 }
544}
545
546static void r5l_move_to_end_ios(struct r5l_log *log)
547{
548 struct r5l_io_unit *io, *next;
549
550 lockdep_assert_held(&log->io_list_lock);
551
552 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
553 /* don't change list order */
554 if (io->state < IO_UNIT_IO_END)
555 break;
556 list_move_tail(&io->log_sibling, &log->io_end_ios);
557 }
558}
559
560static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
561static void r5l_log_endio(struct bio *bio)
562{
563 struct r5l_io_unit *io = bio->bi_private;
564 struct r5l_io_unit *io_deferred;
565 struct r5l_log *log = io->log;
566 unsigned long flags;
567 bool has_null_flush;
568 bool has_flush_payload;
569
570 if (bio->bi_status)
571 md_error(log->rdev->mddev, log->rdev);
572
573 bio_put(bio);
574 mempool_free(io->meta_page, &log->meta_pool);
575
576 spin_lock_irqsave(&log->io_list_lock, flags);
577 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
578
579 /*
580 * if the io doesn't not have null_flush or flush payload,
581 * it is not safe to access it after releasing io_list_lock.
582 * Therefore, it is necessary to check the condition with
583 * the lock held.
584 */
585 has_null_flush = io->has_null_flush;
586 has_flush_payload = io->has_flush_payload;
587
588 if (log->need_cache_flush && !list_empty(&io->stripe_list))
589 r5l_move_to_end_ios(log);
590 else
591 r5l_log_run_stripes(log);
592 if (!list_empty(&log->running_ios)) {
593 /*
594 * FLUSH/FUA io_unit is deferred because of ordering, now we
595 * can dispatch it
596 */
597 io_deferred = list_first_entry(&log->running_ios,
598 struct r5l_io_unit, log_sibling);
599 if (io_deferred->io_deferred)
600 schedule_work(&log->deferred_io_work);
601 }
602
603 spin_unlock_irqrestore(&log->io_list_lock, flags);
604
605 if (log->need_cache_flush)
606 md_wakeup_thread(log->rdev->mddev->thread);
607
608 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
609 if (has_null_flush) {
610 struct bio *bi;
611
612 WARN_ON(bio_list_empty(&io->flush_barriers));
613 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
614 bio_endio(bi);
615 if (atomic_dec_and_test(&io->pending_stripe)) {
616 __r5l_stripe_write_finished(io);
617 return;
618 }
619 }
620 }
621 /* decrease pending_stripe for flush payload */
622 if (has_flush_payload)
623 if (atomic_dec_and_test(&io->pending_stripe))
624 __r5l_stripe_write_finished(io);
625}
626
627static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
628{
629 unsigned long flags;
630
631 spin_lock_irqsave(&log->io_list_lock, flags);
632 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
633 spin_unlock_irqrestore(&log->io_list_lock, flags);
634
635 /*
636 * In case of journal device failures, submit_bio will get error
637 * and calls endio, then active stripes will continue write
638 * process. Therefore, it is not necessary to check Faulty bit
639 * of journal device here.
640 *
641 * We can't check split_bio after current_bio is submitted. If
642 * io->split_bio is null, after current_bio is submitted, current_bio
643 * might already be completed and the io_unit is freed. We submit
644 * split_bio first to avoid the issue.
645 */
646 if (io->split_bio) {
647 if (io->has_flush)
648 io->split_bio->bi_opf |= REQ_PREFLUSH;
649 if (io->has_fua)
650 io->split_bio->bi_opf |= REQ_FUA;
651 submit_bio(io->split_bio);
652 }
653
654 if (io->has_flush)
655 io->current_bio->bi_opf |= REQ_PREFLUSH;
656 if (io->has_fua)
657 io->current_bio->bi_opf |= REQ_FUA;
658 submit_bio(io->current_bio);
659}
660
661/* deferred io_unit will be dispatched here */
662static void r5l_submit_io_async(struct work_struct *work)
663{
664 struct r5l_log *log = container_of(work, struct r5l_log,
665 deferred_io_work);
666 struct r5l_io_unit *io = NULL;
667 unsigned long flags;
668
669 spin_lock_irqsave(&log->io_list_lock, flags);
670 if (!list_empty(&log->running_ios)) {
671 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
672 log_sibling);
673 if (!io->io_deferred)
674 io = NULL;
675 else
676 io->io_deferred = 0;
677 }
678 spin_unlock_irqrestore(&log->io_list_lock, flags);
679 if (io)
680 r5l_do_submit_io(log, io);
681}
682
683static void r5c_disable_writeback_async(struct work_struct *work)
684{
685 struct r5l_log *log = container_of(work, struct r5l_log,
686 disable_writeback_work);
687 struct mddev *mddev = log->rdev->mddev;
688 struct r5conf *conf = mddev->private;
689
690 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
691 return;
692 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
693 mdname(mddev));
694
695 /* wait superblock change before suspend */
696 wait_event(mddev->sb_wait,
697 !READ_ONCE(conf->log) ||
698 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
699
700 log = READ_ONCE(conf->log);
701 if (log) {
702 mddev_suspend(mddev, false);
703 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
704 mddev_resume(mddev);
705 }
706}
707
708static void r5l_submit_current_io(struct r5l_log *log)
709{
710 struct r5l_io_unit *io = log->current_io;
711 struct r5l_meta_block *block;
712 unsigned long flags;
713 u32 crc;
714 bool do_submit = true;
715
716 if (!io)
717 return;
718
719 block = page_address(io->meta_page);
720 block->meta_size = cpu_to_le32(io->meta_offset);
721 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
722 block->checksum = cpu_to_le32(crc);
723
724 log->current_io = NULL;
725 spin_lock_irqsave(&log->io_list_lock, flags);
726 if (io->has_flush || io->has_fua) {
727 if (io != list_first_entry(&log->running_ios,
728 struct r5l_io_unit, log_sibling)) {
729 io->io_deferred = 1;
730 do_submit = false;
731 }
732 }
733 spin_unlock_irqrestore(&log->io_list_lock, flags);
734 if (do_submit)
735 r5l_do_submit_io(log, io);
736}
737
738static struct bio *r5l_bio_alloc(struct r5l_log *log)
739{
740 struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
741 REQ_OP_WRITE, GFP_NOIO, &log->bs);
742
743 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
744
745 return bio;
746}
747
748static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
749{
750 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
751
752 r5c_update_log_state(log);
753 /*
754 * If we filled up the log device start from the beginning again,
755 * which will require a new bio.
756 *
757 * Note: for this to work properly the log size needs to me a multiple
758 * of BLOCK_SECTORS.
759 */
760 if (log->log_start == 0)
761 io->need_split_bio = true;
762
763 io->log_end = log->log_start;
764}
765
766static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
767{
768 struct r5l_io_unit *io;
769 struct r5l_meta_block *block;
770
771 io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
772 if (!io)
773 return NULL;
774 memset(io, 0, sizeof(*io));
775
776 io->log = log;
777 INIT_LIST_HEAD(&io->log_sibling);
778 INIT_LIST_HEAD(&io->stripe_list);
779 bio_list_init(&io->flush_barriers);
780 io->state = IO_UNIT_RUNNING;
781
782 io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
783 block = page_address(io->meta_page);
784 clear_page(block);
785 block->magic = cpu_to_le32(R5LOG_MAGIC);
786 block->version = R5LOG_VERSION;
787 block->seq = cpu_to_le64(log->seq);
788 block->position = cpu_to_le64(log->log_start);
789
790 io->log_start = log->log_start;
791 io->meta_offset = sizeof(struct r5l_meta_block);
792 io->seq = log->seq++;
793
794 io->current_bio = r5l_bio_alloc(log);
795 io->current_bio->bi_end_io = r5l_log_endio;
796 io->current_bio->bi_private = io;
797 __bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
798
799 r5_reserve_log_entry(log, io);
800
801 spin_lock_irq(&log->io_list_lock);
802 list_add_tail(&io->log_sibling, &log->running_ios);
803 spin_unlock_irq(&log->io_list_lock);
804
805 return io;
806}
807
808static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
809{
810 if (log->current_io &&
811 log->current_io->meta_offset + payload_size > PAGE_SIZE)
812 r5l_submit_current_io(log);
813
814 if (!log->current_io) {
815 log->current_io = r5l_new_meta(log);
816 if (!log->current_io)
817 return -ENOMEM;
818 }
819
820 return 0;
821}
822
823static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
824 sector_t location,
825 u32 checksum1, u32 checksum2,
826 bool checksum2_valid)
827{
828 struct r5l_io_unit *io = log->current_io;
829 struct r5l_payload_data_parity *payload;
830
831 payload = page_address(io->meta_page) + io->meta_offset;
832 payload->header.type = cpu_to_le16(type);
833 payload->header.flags = cpu_to_le16(0);
834 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
835 (PAGE_SHIFT - 9));
836 payload->location = cpu_to_le64(location);
837 payload->checksum[0] = cpu_to_le32(checksum1);
838 if (checksum2_valid)
839 payload->checksum[1] = cpu_to_le32(checksum2);
840
841 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
842 sizeof(__le32) * (1 + !!checksum2_valid);
843}
844
845static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
846{
847 struct r5l_io_unit *io = log->current_io;
848
849 if (io->need_split_bio) {
850 BUG_ON(io->split_bio);
851 io->split_bio = io->current_bio;
852 io->current_bio = r5l_bio_alloc(log);
853 bio_chain(io->current_bio, io->split_bio);
854 io->need_split_bio = false;
855 }
856
857 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
858 BUG();
859
860 r5_reserve_log_entry(log, io);
861}
862
863static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
864{
865 struct mddev *mddev = log->rdev->mddev;
866 struct r5conf *conf = mddev->private;
867 struct r5l_io_unit *io;
868 struct r5l_payload_flush *payload;
869 int meta_size;
870
871 /*
872 * payload_flush requires extra writes to the journal.
873 * To avoid handling the extra IO in quiesce, just skip
874 * flush_payload
875 */
876 if (conf->quiesce)
877 return;
878
879 mutex_lock(&log->io_mutex);
880 meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
881
882 if (r5l_get_meta(log, meta_size)) {
883 mutex_unlock(&log->io_mutex);
884 return;
885 }
886
887 /* current implementation is one stripe per flush payload */
888 io = log->current_io;
889 payload = page_address(io->meta_page) + io->meta_offset;
890 payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
891 payload->header.flags = cpu_to_le16(0);
892 payload->size = cpu_to_le32(sizeof(__le64));
893 payload->flush_stripes[0] = cpu_to_le64(sect);
894 io->meta_offset += meta_size;
895 /* multiple flush payloads count as one pending_stripe */
896 if (!io->has_flush_payload) {
897 io->has_flush_payload = 1;
898 atomic_inc(&io->pending_stripe);
899 }
900 mutex_unlock(&log->io_mutex);
901}
902
903static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
904 int data_pages, int parity_pages)
905{
906 int i;
907 int meta_size;
908 int ret;
909 struct r5l_io_unit *io;
910
911 meta_size =
912 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
913 * data_pages) +
914 sizeof(struct r5l_payload_data_parity) +
915 sizeof(__le32) * parity_pages;
916
917 ret = r5l_get_meta(log, meta_size);
918 if (ret)
919 return ret;
920
921 io = log->current_io;
922
923 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
924 io->has_flush = 1;
925
926 for (i = 0; i < sh->disks; i++) {
927 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
928 test_bit(R5_InJournal, &sh->dev[i].flags))
929 continue;
930 if (i == sh->pd_idx || i == sh->qd_idx)
931 continue;
932 if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
933 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
934 io->has_fua = 1;
935 /*
936 * we need to flush journal to make sure recovery can
937 * reach the data with fua flag
938 */
939 io->has_flush = 1;
940 }
941 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
942 raid5_compute_blocknr(sh, i, 0),
943 sh->dev[i].log_checksum, 0, false);
944 r5l_append_payload_page(log, sh->dev[i].page);
945 }
946
947 if (parity_pages == 2) {
948 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
949 sh->sector, sh->dev[sh->pd_idx].log_checksum,
950 sh->dev[sh->qd_idx].log_checksum, true);
951 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
952 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
953 } else if (parity_pages == 1) {
954 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
955 sh->sector, sh->dev[sh->pd_idx].log_checksum,
956 0, false);
957 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
958 } else /* Just writing data, not parity, in caching phase */
959 BUG_ON(parity_pages != 0);
960
961 list_add_tail(&sh->log_list, &io->stripe_list);
962 atomic_inc(&io->pending_stripe);
963 sh->log_io = io;
964
965 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
966 return 0;
967
968 if (sh->log_start == MaxSector) {
969 BUG_ON(!list_empty(&sh->r5c));
970 sh->log_start = io->log_start;
971 spin_lock_irq(&log->stripe_in_journal_lock);
972 list_add_tail(&sh->r5c,
973 &log->stripe_in_journal_list);
974 spin_unlock_irq(&log->stripe_in_journal_lock);
975 atomic_inc(&log->stripe_in_journal_count);
976 }
977 return 0;
978}
979
980/* add stripe to no_space_stripes, and then wake up reclaim */
981static inline void r5l_add_no_space_stripe(struct r5l_log *log,
982 struct stripe_head *sh)
983{
984 spin_lock(&log->no_space_stripes_lock);
985 list_add_tail(&sh->log_list, &log->no_space_stripes);
986 spin_unlock(&log->no_space_stripes_lock);
987}
988
989/*
990 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
991 * data from log to raid disks), so we shouldn't wait for reclaim here
992 */
993int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
994{
995 struct r5conf *conf = sh->raid_conf;
996 int write_disks = 0;
997 int data_pages, parity_pages;
998 int reserve;
999 int i;
1000 int ret = 0;
1001 bool wake_reclaim = false;
1002
1003 if (!log)
1004 return -EAGAIN;
1005 /* Don't support stripe batch */
1006 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1007 test_bit(STRIPE_SYNCING, &sh->state)) {
1008 /* the stripe is written to log, we start writing it to raid */
1009 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
1010 return -EAGAIN;
1011 }
1012
1013 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
1014
1015 for (i = 0; i < sh->disks; i++) {
1016 void *addr;
1017
1018 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
1019 test_bit(R5_InJournal, &sh->dev[i].flags))
1020 continue;
1021
1022 write_disks++;
1023 /* checksum is already calculated in last run */
1024 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1025 continue;
1026 addr = kmap_atomic(sh->dev[i].page);
1027 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1028 addr, PAGE_SIZE);
1029 kunmap_atomic(addr);
1030 }
1031 parity_pages = 1 + !!(sh->qd_idx >= 0);
1032 data_pages = write_disks - parity_pages;
1033
1034 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
1035 /*
1036 * The stripe must enter state machine again to finish the write, so
1037 * don't delay.
1038 */
1039 clear_bit(STRIPE_DELAYED, &sh->state);
1040 atomic_inc(&sh->count);
1041
1042 mutex_lock(&log->io_mutex);
1043 /* meta + data */
1044 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
1045
1046 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1047 if (!r5l_has_free_space(log, reserve)) {
1048 r5l_add_no_space_stripe(log, sh);
1049 wake_reclaim = true;
1050 } else {
1051 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1052 if (ret) {
1053 spin_lock_irq(&log->io_list_lock);
1054 list_add_tail(&sh->log_list,
1055 &log->no_mem_stripes);
1056 spin_unlock_irq(&log->io_list_lock);
1057 }
1058 }
1059 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1060 /*
1061 * log space critical, do not process stripes that are
1062 * not in cache yet (sh->log_start == MaxSector).
1063 */
1064 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1065 sh->log_start == MaxSector) {
1066 r5l_add_no_space_stripe(log, sh);
1067 wake_reclaim = true;
1068 reserve = 0;
1069 } else if (!r5l_has_free_space(log, reserve)) {
1070 if (sh->log_start == log->last_checkpoint)
1071 BUG();
1072 else
1073 r5l_add_no_space_stripe(log, sh);
1074 } else {
1075 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1076 if (ret) {
1077 spin_lock_irq(&log->io_list_lock);
1078 list_add_tail(&sh->log_list,
1079 &log->no_mem_stripes);
1080 spin_unlock_irq(&log->io_list_lock);
1081 }
1082 }
1083 }
1084
1085 mutex_unlock(&log->io_mutex);
1086 if (wake_reclaim)
1087 r5l_wake_reclaim(log, reserve);
1088 return 0;
1089}
1090
1091void r5l_write_stripe_run(struct r5l_log *log)
1092{
1093 if (!log)
1094 return;
1095 mutex_lock(&log->io_mutex);
1096 r5l_submit_current_io(log);
1097 mutex_unlock(&log->io_mutex);
1098}
1099
1100int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1101{
1102 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1103 /*
1104 * in write through (journal only)
1105 * we flush log disk cache first, then write stripe data to
1106 * raid disks. So if bio is finished, the log disk cache is
1107 * flushed already. The recovery guarantees we can recovery
1108 * the bio from log disk, so we don't need to flush again
1109 */
1110 if (bio->bi_iter.bi_size == 0) {
1111 bio_endio(bio);
1112 return 0;
1113 }
1114 bio->bi_opf &= ~REQ_PREFLUSH;
1115 } else {
1116 /* write back (with cache) */
1117 if (bio->bi_iter.bi_size == 0) {
1118 mutex_lock(&log->io_mutex);
1119 r5l_get_meta(log, 0);
1120 bio_list_add(&log->current_io->flush_barriers, bio);
1121 log->current_io->has_flush = 1;
1122 log->current_io->has_null_flush = 1;
1123 atomic_inc(&log->current_io->pending_stripe);
1124 r5l_submit_current_io(log);
1125 mutex_unlock(&log->io_mutex);
1126 return 0;
1127 }
1128 }
1129 return -EAGAIN;
1130}
1131
1132/* This will run after log space is reclaimed */
1133static void r5l_run_no_space_stripes(struct r5l_log *log)
1134{
1135 struct stripe_head *sh;
1136
1137 spin_lock(&log->no_space_stripes_lock);
1138 while (!list_empty(&log->no_space_stripes)) {
1139 sh = list_first_entry(&log->no_space_stripes,
1140 struct stripe_head, log_list);
1141 list_del_init(&sh->log_list);
1142 set_bit(STRIPE_HANDLE, &sh->state);
1143 raid5_release_stripe(sh);
1144 }
1145 spin_unlock(&log->no_space_stripes_lock);
1146}
1147
1148/*
1149 * calculate new last_checkpoint
1150 * for write through mode, returns log->next_checkpoint
1151 * for write back, returns log_start of first sh in stripe_in_journal_list
1152 */
1153static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1154{
1155 struct stripe_head *sh;
1156 struct r5l_log *log = READ_ONCE(conf->log);
1157 sector_t new_cp;
1158 unsigned long flags;
1159
1160 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1161 return log->next_checkpoint;
1162
1163 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1164 if (list_empty(&log->stripe_in_journal_list)) {
1165 /* all stripes flushed */
1166 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1167 return log->next_checkpoint;
1168 }
1169 sh = list_first_entry(&log->stripe_in_journal_list,
1170 struct stripe_head, r5c);
1171 new_cp = sh->log_start;
1172 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1173 return new_cp;
1174}
1175
1176static sector_t r5l_reclaimable_space(struct r5l_log *log)
1177{
1178 struct r5conf *conf = log->rdev->mddev->private;
1179
1180 return r5l_ring_distance(log, log->last_checkpoint,
1181 r5c_calculate_new_cp(conf));
1182}
1183
1184static void r5l_run_no_mem_stripe(struct r5l_log *log)
1185{
1186 struct stripe_head *sh;
1187
1188 lockdep_assert_held(&log->io_list_lock);
1189
1190 if (!list_empty(&log->no_mem_stripes)) {
1191 sh = list_first_entry(&log->no_mem_stripes,
1192 struct stripe_head, log_list);
1193 list_del_init(&sh->log_list);
1194 set_bit(STRIPE_HANDLE, &sh->state);
1195 raid5_release_stripe(sh);
1196 }
1197}
1198
1199static bool r5l_complete_finished_ios(struct r5l_log *log)
1200{
1201 struct r5l_io_unit *io, *next;
1202 bool found = false;
1203
1204 lockdep_assert_held(&log->io_list_lock);
1205
1206 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1207 /* don't change list order */
1208 if (io->state < IO_UNIT_STRIPE_END)
1209 break;
1210
1211 log->next_checkpoint = io->log_start;
1212
1213 list_del(&io->log_sibling);
1214 mempool_free(io, &log->io_pool);
1215 r5l_run_no_mem_stripe(log);
1216
1217 found = true;
1218 }
1219
1220 return found;
1221}
1222
1223static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1224{
1225 struct r5l_log *log = io->log;
1226 struct r5conf *conf = log->rdev->mddev->private;
1227 unsigned long flags;
1228
1229 spin_lock_irqsave(&log->io_list_lock, flags);
1230 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
1231
1232 if (!r5l_complete_finished_ios(log)) {
1233 spin_unlock_irqrestore(&log->io_list_lock, flags);
1234 return;
1235 }
1236
1237 if (r5l_reclaimable_space(log) > log->max_free_space ||
1238 test_bit(R5C_LOG_TIGHT, &conf->cache_state))
1239 r5l_wake_reclaim(log, 0);
1240
1241 spin_unlock_irqrestore(&log->io_list_lock, flags);
1242 wake_up(&log->iounit_wait);
1243}
1244
1245void r5l_stripe_write_finished(struct stripe_head *sh)
1246{
1247 struct r5l_io_unit *io;
1248
1249 io = sh->log_io;
1250 sh->log_io = NULL;
1251
1252 if (io && atomic_dec_and_test(&io->pending_stripe))
1253 __r5l_stripe_write_finished(io);
1254}
1255
1256static void r5l_log_flush_endio(struct bio *bio)
1257{
1258 struct r5l_log *log = container_of(bio, struct r5l_log,
1259 flush_bio);
1260 unsigned long flags;
1261 struct r5l_io_unit *io;
1262
1263 if (bio->bi_status)
1264 md_error(log->rdev->mddev, log->rdev);
1265 bio_uninit(bio);
1266
1267 spin_lock_irqsave(&log->io_list_lock, flags);
1268 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1269 r5l_io_run_stripes(io);
1270 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1271 spin_unlock_irqrestore(&log->io_list_lock, flags);
1272}
1273
1274/*
1275 * Starting dispatch IO to raid.
1276 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1277 * broken meta in the middle of a log causes recovery can't find meta at the
1278 * head of log. If operations require meta at the head persistent in log, we
1279 * must make sure meta before it persistent in log too. A case is:
1280 *
1281 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1282 * data/parity must be persistent in log before we do the write to raid disks.
1283 *
1284 * The solution is we restrictly maintain io_unit list order. In this case, we
1285 * only write stripes of an io_unit to raid disks till the io_unit is the first
1286 * one whose data/parity is in log.
1287 */
1288void r5l_flush_stripe_to_raid(struct r5l_log *log)
1289{
1290 bool do_flush;
1291
1292 if (!log || !log->need_cache_flush)
1293 return;
1294
1295 spin_lock_irq(&log->io_list_lock);
1296 /* flush bio is running */
1297 if (!list_empty(&log->flushing_ios)) {
1298 spin_unlock_irq(&log->io_list_lock);
1299 return;
1300 }
1301 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1302 do_flush = !list_empty(&log->flushing_ios);
1303 spin_unlock_irq(&log->io_list_lock);
1304
1305 if (!do_flush)
1306 return;
1307 bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1308 REQ_OP_WRITE | REQ_PREFLUSH);
1309 log->flush_bio.bi_end_io = r5l_log_flush_endio;
1310 submit_bio(&log->flush_bio);
1311}
1312
1313static void r5l_write_super(struct r5l_log *log, sector_t cp);
1314static void r5l_write_super_and_discard_space(struct r5l_log *log,
1315 sector_t end)
1316{
1317 struct block_device *bdev = log->rdev->bdev;
1318 struct mddev *mddev;
1319
1320 r5l_write_super(log, end);
1321
1322 if (!bdev_max_discard_sectors(bdev))
1323 return;
1324
1325 mddev = log->rdev->mddev;
1326 /*
1327 * Discard could zero data, so before discard we must make sure
1328 * superblock is updated to new log tail. Updating superblock (either
1329 * directly call md_update_sb() or depend on md thread) must hold
1330 * reconfig mutex. On the other hand, raid5_quiesce is called with
1331 * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
1332 * for all IO finish, hence waiting for reclaim thread, while reclaim
1333 * thread is calling this function and waiting for reconfig mutex. So
1334 * there is a deadlock. We workaround this issue with a trylock.
1335 * FIXME: we could miss discard if we can't take reconfig mutex
1336 */
1337 set_mask_bits(&mddev->sb_flags, 0,
1338 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1339 if (!mddev_trylock(mddev))
1340 return;
1341 md_update_sb(mddev, 1);
1342 mddev_unlock(mddev);
1343
1344 /* discard IO error really doesn't matter, ignore it */
1345 if (log->last_checkpoint < end) {
1346 blkdev_issue_discard(bdev,
1347 log->last_checkpoint + log->rdev->data_offset,
1348 end - log->last_checkpoint, GFP_NOIO);
1349 } else {
1350 blkdev_issue_discard(bdev,
1351 log->last_checkpoint + log->rdev->data_offset,
1352 log->device_size - log->last_checkpoint,
1353 GFP_NOIO);
1354 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1355 GFP_NOIO);
1356 }
1357}
1358
1359/*
1360 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1361 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1362 *
1363 * must hold conf->device_lock
1364 */
1365static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1366{
1367 BUG_ON(list_empty(&sh->lru));
1368 BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1369 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1370
1371 /*
1372 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1373 * raid5_release_stripe() while holding conf->device_lock
1374 */
1375 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1376 lockdep_assert_held(&conf->device_lock);
1377
1378 list_del_init(&sh->lru);
1379 atomic_inc(&sh->count);
1380
1381 set_bit(STRIPE_HANDLE, &sh->state);
1382 atomic_inc(&conf->active_stripes);
1383 r5c_make_stripe_write_out(sh);
1384
1385 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1386 atomic_inc(&conf->r5c_flushing_partial_stripes);
1387 else
1388 atomic_inc(&conf->r5c_flushing_full_stripes);
1389 raid5_release_stripe(sh);
1390}
1391
1392/*
1393 * if num == 0, flush all full stripes
1394 * if num > 0, flush all full stripes. If less than num full stripes are
1395 * flushed, flush some partial stripes until totally num stripes are
1396 * flushed or there is no more cached stripes.
1397 */
1398void r5c_flush_cache(struct r5conf *conf, int num)
1399{
1400 int count;
1401 struct stripe_head *sh, *next;
1402
1403 lockdep_assert_held(&conf->device_lock);
1404 if (!READ_ONCE(conf->log))
1405 return;
1406
1407 count = 0;
1408 list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1409 r5c_flush_stripe(conf, sh);
1410 count++;
1411 }
1412
1413 if (count >= num)
1414 return;
1415 list_for_each_entry_safe(sh, next,
1416 &conf->r5c_partial_stripe_list, lru) {
1417 r5c_flush_stripe(conf, sh);
1418 if (++count >= num)
1419 break;
1420 }
1421}
1422
1423static void r5c_do_reclaim(struct r5conf *conf)
1424{
1425 struct r5l_log *log = READ_ONCE(conf->log);
1426 struct stripe_head *sh;
1427 int count = 0;
1428 unsigned long flags;
1429 int total_cached;
1430 int stripes_to_flush;
1431 int flushing_partial, flushing_full;
1432
1433 if (!r5c_is_writeback(log))
1434 return;
1435
1436 flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1437 flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
1438 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
1439 atomic_read(&conf->r5c_cached_full_stripes) -
1440 flushing_full - flushing_partial;
1441
1442 if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1443 atomic_read(&conf->empty_inactive_list_nr) > 0)
1444 /*
1445 * if stripe cache pressure high, flush all full stripes and
1446 * some partial stripes
1447 */
1448 stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1449 else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
1450 atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
1451 R5C_FULL_STRIPE_FLUSH_BATCH(conf))
1452 /*
1453 * if stripe cache pressure moderate, or if there is many full
1454 * stripes,flush all full stripes
1455 */
1456 stripes_to_flush = 0;
1457 else
1458 /* no need to flush */
1459 stripes_to_flush = -1;
1460
1461 if (stripes_to_flush >= 0) {
1462 spin_lock_irqsave(&conf->device_lock, flags);
1463 r5c_flush_cache(conf, stripes_to_flush);
1464 spin_unlock_irqrestore(&conf->device_lock, flags);
1465 }
1466
1467 /* if log space is tight, flush stripes on stripe_in_journal_list */
1468 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1469 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1470 spin_lock(&conf->device_lock);
1471 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1472 /*
1473 * stripes on stripe_in_journal_list could be in any
1474 * state of the stripe_cache state machine. In this
1475 * case, we only want to flush stripe on
1476 * r5c_cached_full/partial_stripes. The following
1477 * condition makes sure the stripe is on one of the
1478 * two lists.
1479 */
1480 if (!list_empty(&sh->lru) &&
1481 !test_bit(STRIPE_HANDLE, &sh->state) &&
1482 atomic_read(&sh->count) == 0) {
1483 r5c_flush_stripe(conf, sh);
1484 if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1485 break;
1486 }
1487 }
1488 spin_unlock(&conf->device_lock);
1489 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1490 }
1491
1492 if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1493 r5l_run_no_space_stripes(log);
1494
1495 md_wakeup_thread(conf->mddev->thread);
1496}
1497
1498static void r5l_do_reclaim(struct r5l_log *log)
1499{
1500 struct r5conf *conf = log->rdev->mddev->private;
1501 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1502 sector_t reclaimable;
1503 sector_t next_checkpoint;
1504 bool write_super;
1505
1506 spin_lock_irq(&log->io_list_lock);
1507 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1508 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1509 /*
1510 * move proper io_unit to reclaim list. We should not change the order.
1511 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1512 * shouldn't reuse space of an unreclaimable io_unit
1513 */
1514 while (1) {
1515 reclaimable = r5l_reclaimable_space(log);
1516 if (reclaimable >= reclaim_target ||
1517 (list_empty(&log->running_ios) &&
1518 list_empty(&log->io_end_ios) &&
1519 list_empty(&log->flushing_ios) &&
1520 list_empty(&log->finished_ios)))
1521 break;
1522
1523 md_wakeup_thread(log->rdev->mddev->thread);
1524 wait_event_lock_irq(log->iounit_wait,
1525 r5l_reclaimable_space(log) > reclaimable,
1526 log->io_list_lock);
1527 }
1528
1529 next_checkpoint = r5c_calculate_new_cp(conf);
1530 spin_unlock_irq(&log->io_list_lock);
1531
1532 if (reclaimable == 0 || !write_super)
1533 return;
1534
1535 /*
1536 * write_super will flush cache of each raid disk. We must write super
1537 * here, because the log area might be reused soon and we don't want to
1538 * confuse recovery
1539 */
1540 r5l_write_super_and_discard_space(log, next_checkpoint);
1541
1542 mutex_lock(&log->io_mutex);
1543 log->last_checkpoint = next_checkpoint;
1544 r5c_update_log_state(log);
1545 mutex_unlock(&log->io_mutex);
1546
1547 r5l_run_no_space_stripes(log);
1548}
1549
1550static void r5l_reclaim_thread(struct md_thread *thread)
1551{
1552 struct mddev *mddev = thread->mddev;
1553 struct r5conf *conf = mddev->private;
1554 struct r5l_log *log = READ_ONCE(conf->log);
1555
1556 if (!log)
1557 return;
1558 r5c_do_reclaim(conf);
1559 r5l_do_reclaim(log);
1560}
1561
1562void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1563{
1564 unsigned long target;
1565 unsigned long new = (unsigned long)space; /* overflow in theory */
1566
1567 if (!log)
1568 return;
1569
1570 target = READ_ONCE(log->reclaim_target);
1571 do {
1572 if (new < target)
1573 return;
1574 } while (!try_cmpxchg(&log->reclaim_target, &target, new));
1575 md_wakeup_thread(log->reclaim_thread);
1576}
1577
1578void r5l_quiesce(struct r5l_log *log, int quiesce)
1579{
1580 struct mddev *mddev = log->rdev->mddev;
1581 struct md_thread *thread = rcu_dereference_protected(
1582 log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1583
1584 if (quiesce) {
1585 /* make sure r5l_write_super_and_discard_space exits */
1586 wake_up(&mddev->sb_wait);
1587 kthread_park(thread->tsk);
1588 r5l_wake_reclaim(log, MaxSector);
1589 r5l_do_reclaim(log);
1590 } else
1591 kthread_unpark(thread->tsk);
1592}
1593
1594bool r5l_log_disk_error(struct r5conf *conf)
1595{
1596 struct r5l_log *log = READ_ONCE(conf->log);
1597
1598 /* don't allow write if journal disk is missing */
1599 if (!log)
1600 return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1601 else
1602 return test_bit(Faulty, &log->rdev->flags);
1603}
1604
1605#define R5L_RECOVERY_PAGE_POOL_SIZE 256
1606
1607struct r5l_recovery_ctx {
1608 struct page *meta_page; /* current meta */
1609 sector_t meta_total_blocks; /* total size of current meta and data */
1610 sector_t pos; /* recovery position */
1611 u64 seq; /* recovery position seq */
1612 int data_parity_stripes; /* number of data_parity stripes */
1613 int data_only_stripes; /* number of data_only stripes */
1614 struct list_head cached_list;
1615
1616 /*
1617 * read ahead page pool (ra_pool)
1618 * in recovery, log is read sequentially. It is not efficient to
1619 * read every page with sync_page_io(). The read ahead page pool
1620 * reads multiple pages with one IO, so further log read can
1621 * just copy data from the pool.
1622 */
1623 struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
1624 struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
1625 sector_t pool_offset; /* offset of first page in the pool */
1626 int total_pages; /* total allocated pages */
1627 int valid_pages; /* pages with valid data */
1628};
1629
1630static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1631 struct r5l_recovery_ctx *ctx)
1632{
1633 struct page *page;
1634
1635 ctx->valid_pages = 0;
1636 ctx->total_pages = 0;
1637 while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1638 page = alloc_page(GFP_KERNEL);
1639
1640 if (!page)
1641 break;
1642 ctx->ra_pool[ctx->total_pages] = page;
1643 ctx->total_pages += 1;
1644 }
1645
1646 if (ctx->total_pages == 0)
1647 return -ENOMEM;
1648
1649 ctx->pool_offset = 0;
1650 return 0;
1651}
1652
1653static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1654 struct r5l_recovery_ctx *ctx)
1655{
1656 int i;
1657
1658 for (i = 0; i < ctx->total_pages; ++i)
1659 put_page(ctx->ra_pool[i]);
1660}
1661
1662/*
1663 * fetch ctx->valid_pages pages from offset
1664 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1665 * However, if the offset is close to the end of the journal device,
1666 * ctx->valid_pages could be smaller than ctx->total_pages
1667 */
1668static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1669 struct r5l_recovery_ctx *ctx,
1670 sector_t offset)
1671{
1672 struct bio bio;
1673 int ret;
1674
1675 bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1676 R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
1677 bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1678
1679 ctx->valid_pages = 0;
1680 ctx->pool_offset = offset;
1681
1682 while (ctx->valid_pages < ctx->total_pages) {
1683 __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
1684 0);
1685 ctx->valid_pages += 1;
1686
1687 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1688
1689 if (offset == 0) /* reached end of the device */
1690 break;
1691 }
1692
1693 ret = submit_bio_wait(&bio);
1694 bio_uninit(&bio);
1695 return ret;
1696}
1697
1698/*
1699 * try read a page from the read ahead page pool, if the page is not in the
1700 * pool, call r5l_recovery_fetch_ra_pool
1701 */
1702static int r5l_recovery_read_page(struct r5l_log *log,
1703 struct r5l_recovery_ctx *ctx,
1704 struct page *page,
1705 sector_t offset)
1706{
1707 int ret;
1708
1709 if (offset < ctx->pool_offset ||
1710 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1711 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1712 if (ret)
1713 return ret;
1714 }
1715
1716 BUG_ON(offset < ctx->pool_offset ||
1717 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1718
1719 memcpy(page_address(page),
1720 page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1721 BLOCK_SECTOR_SHIFT]),
1722 PAGE_SIZE);
1723 return 0;
1724}
1725
1726static int r5l_recovery_read_meta_block(struct r5l_log *log,
1727 struct r5l_recovery_ctx *ctx)
1728{
1729 struct page *page = ctx->meta_page;
1730 struct r5l_meta_block *mb;
1731 u32 crc, stored_crc;
1732 int ret;
1733
1734 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1735 if (ret != 0)
1736 return ret;
1737
1738 mb = page_address(page);
1739 stored_crc = le32_to_cpu(mb->checksum);
1740 mb->checksum = 0;
1741
1742 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1743 le64_to_cpu(mb->seq) != ctx->seq ||
1744 mb->version != R5LOG_VERSION ||
1745 le64_to_cpu(mb->position) != ctx->pos)
1746 return -EINVAL;
1747
1748 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1749 if (stored_crc != crc)
1750 return -EINVAL;
1751
1752 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1753 return -EINVAL;
1754
1755 ctx->meta_total_blocks = BLOCK_SECTORS;
1756
1757 return 0;
1758}
1759
1760static void
1761r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1762 struct page *page,
1763 sector_t pos, u64 seq)
1764{
1765 struct r5l_meta_block *mb;
1766
1767 mb = page_address(page);
1768 clear_page(mb);
1769 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1770 mb->version = R5LOG_VERSION;
1771 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1772 mb->seq = cpu_to_le64(seq);
1773 mb->position = cpu_to_le64(pos);
1774}
1775
1776static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1777 u64 seq)
1778{
1779 struct page *page;
1780 struct r5l_meta_block *mb;
1781
1782 page = alloc_page(GFP_KERNEL);
1783 if (!page)
1784 return -ENOMEM;
1785 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1786 mb = page_address(page);
1787 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1788 mb, PAGE_SIZE));
1789 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
1790 REQ_SYNC | REQ_FUA, false)) {
1791 __free_page(page);
1792 return -EIO;
1793 }
1794 __free_page(page);
1795 return 0;
1796}
1797
1798/*
1799 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1800 * to mark valid (potentially not flushed) data in the journal.
1801 *
1802 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1803 * so there should not be any mismatch here.
1804 */
1805static void r5l_recovery_load_data(struct r5l_log *log,
1806 struct stripe_head *sh,
1807 struct r5l_recovery_ctx *ctx,
1808 struct r5l_payload_data_parity *payload,
1809 sector_t log_offset)
1810{
1811 struct mddev *mddev = log->rdev->mddev;
1812 struct r5conf *conf = mddev->private;
1813 int dd_idx;
1814
1815 raid5_compute_sector(conf,
1816 le64_to_cpu(payload->location), 0,
1817 &dd_idx, sh);
1818 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1819 sh->dev[dd_idx].log_checksum =
1820 le32_to_cpu(payload->checksum[0]);
1821 ctx->meta_total_blocks += BLOCK_SECTORS;
1822
1823 set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1824 set_bit(STRIPE_R5C_CACHING, &sh->state);
1825}
1826
1827static void r5l_recovery_load_parity(struct r5l_log *log,
1828 struct stripe_head *sh,
1829 struct r5l_recovery_ctx *ctx,
1830 struct r5l_payload_data_parity *payload,
1831 sector_t log_offset)
1832{
1833 struct mddev *mddev = log->rdev->mddev;
1834 struct r5conf *conf = mddev->private;
1835
1836 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1837 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1838 sh->dev[sh->pd_idx].log_checksum =
1839 le32_to_cpu(payload->checksum[0]);
1840 set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1841
1842 if (sh->qd_idx >= 0) {
1843 r5l_recovery_read_page(
1844 log, ctx, sh->dev[sh->qd_idx].page,
1845 r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1846 sh->dev[sh->qd_idx].log_checksum =
1847 le32_to_cpu(payload->checksum[1]);
1848 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1849 }
1850 clear_bit(STRIPE_R5C_CACHING, &sh->state);
1851}
1852
1853static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1854{
1855 int i;
1856
1857 sh->state = 0;
1858 sh->log_start = MaxSector;
1859 for (i = sh->disks; i--; )
1860 sh->dev[i].flags = 0;
1861}
1862
1863static void
1864r5l_recovery_replay_one_stripe(struct r5conf *conf,
1865 struct stripe_head *sh,
1866 struct r5l_recovery_ctx *ctx)
1867{
1868 struct md_rdev *rdev, *rrdev;
1869 int disk_index;
1870 int data_count = 0;
1871
1872 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1873 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1874 continue;
1875 if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1876 continue;
1877 data_count++;
1878 }
1879
1880 /*
1881 * stripes that only have parity must have been flushed
1882 * before the crash that we are now recovering from, so
1883 * there is nothing more to recovery.
1884 */
1885 if (data_count == 0)
1886 goto out;
1887
1888 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1889 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1890 continue;
1891
1892 /* in case device is broken */
1893 rdev = conf->disks[disk_index].rdev;
1894 if (rdev) {
1895 atomic_inc(&rdev->nr_pending);
1896 sync_page_io(rdev, sh->sector, PAGE_SIZE,
1897 sh->dev[disk_index].page, REQ_OP_WRITE,
1898 false);
1899 rdev_dec_pending(rdev, rdev->mddev);
1900 }
1901 rrdev = conf->disks[disk_index].replacement;
1902 if (rrdev) {
1903 atomic_inc(&rrdev->nr_pending);
1904 sync_page_io(rrdev, sh->sector, PAGE_SIZE,
1905 sh->dev[disk_index].page, REQ_OP_WRITE,
1906 false);
1907 rdev_dec_pending(rrdev, rrdev->mddev);
1908 }
1909 }
1910 ctx->data_parity_stripes++;
1911out:
1912 r5l_recovery_reset_stripe(sh);
1913}
1914
1915static struct stripe_head *
1916r5c_recovery_alloc_stripe(
1917 struct r5conf *conf,
1918 sector_t stripe_sect,
1919 int noblock)
1920{
1921 struct stripe_head *sh;
1922
1923 sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
1924 noblock ? R5_GAS_NOBLOCK : 0);
1925 if (!sh)
1926 return NULL; /* no more stripe available */
1927
1928 r5l_recovery_reset_stripe(sh);
1929
1930 return sh;
1931}
1932
1933static struct stripe_head *
1934r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1935{
1936 struct stripe_head *sh;
1937
1938 list_for_each_entry(sh, list, lru)
1939 if (sh->sector == sect)
1940 return sh;
1941 return NULL;
1942}
1943
1944static void
1945r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1946 struct r5l_recovery_ctx *ctx)
1947{
1948 struct stripe_head *sh, *next;
1949
1950 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1951 r5l_recovery_reset_stripe(sh);
1952 list_del_init(&sh->lru);
1953 raid5_release_stripe(sh);
1954 }
1955}
1956
1957static void
1958r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1959 struct r5l_recovery_ctx *ctx)
1960{
1961 struct stripe_head *sh, *next;
1962
1963 list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1964 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1965 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1966 list_del_init(&sh->lru);
1967 raid5_release_stripe(sh);
1968 }
1969}
1970
1971/* if matches return 0; otherwise return -EINVAL */
1972static int
1973r5l_recovery_verify_data_checksum(struct r5l_log *log,
1974 struct r5l_recovery_ctx *ctx,
1975 struct page *page,
1976 sector_t log_offset, __le32 log_checksum)
1977{
1978 void *addr;
1979 u32 checksum;
1980
1981 r5l_recovery_read_page(log, ctx, page, log_offset);
1982 addr = kmap_atomic(page);
1983 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1984 kunmap_atomic(addr);
1985 return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1986}
1987
1988/*
1989 * before loading data to stripe cache, we need verify checksum for all data,
1990 * if there is mismatch for any data page, we drop all data in the mata block
1991 */
1992static int
1993r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1994 struct r5l_recovery_ctx *ctx)
1995{
1996 struct mddev *mddev = log->rdev->mddev;
1997 struct r5conf *conf = mddev->private;
1998 struct r5l_meta_block *mb = page_address(ctx->meta_page);
1999 sector_t mb_offset = sizeof(struct r5l_meta_block);
2000 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2001 struct page *page;
2002 struct r5l_payload_data_parity *payload;
2003 struct r5l_payload_flush *payload_flush;
2004
2005 page = alloc_page(GFP_KERNEL);
2006 if (!page)
2007 return -ENOMEM;
2008
2009 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2010 payload = (void *)mb + mb_offset;
2011 payload_flush = (void *)mb + mb_offset;
2012
2013 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2014 if (r5l_recovery_verify_data_checksum(
2015 log, ctx, page, log_offset,
2016 payload->checksum[0]) < 0)
2017 goto mismatch;
2018 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2019 if (r5l_recovery_verify_data_checksum(
2020 log, ctx, page, log_offset,
2021 payload->checksum[0]) < 0)
2022 goto mismatch;
2023 if (conf->max_degraded == 2 && /* q for RAID 6 */
2024 r5l_recovery_verify_data_checksum(
2025 log, ctx, page,
2026 r5l_ring_add(log, log_offset,
2027 BLOCK_SECTORS),
2028 payload->checksum[1]) < 0)
2029 goto mismatch;
2030 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2031 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2032 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2033 goto mismatch;
2034
2035 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2036 mb_offset += sizeof(struct r5l_payload_flush) +
2037 le32_to_cpu(payload_flush->size);
2038 } else {
2039 /* DATA or PARITY payload */
2040 log_offset = r5l_ring_add(log, log_offset,
2041 le32_to_cpu(payload->size));
2042 mb_offset += sizeof(struct r5l_payload_data_parity) +
2043 sizeof(__le32) *
2044 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2045 }
2046
2047 }
2048
2049 put_page(page);
2050 return 0;
2051
2052mismatch:
2053 put_page(page);
2054 return -EINVAL;
2055}
2056
2057/*
2058 * Analyze all data/parity pages in one meta block
2059 * Returns:
2060 * 0 for success
2061 * -EINVAL for unknown playload type
2062 * -EAGAIN for checksum mismatch of data page
2063 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2064 */
2065static int
2066r5c_recovery_analyze_meta_block(struct r5l_log *log,
2067 struct r5l_recovery_ctx *ctx,
2068 struct list_head *cached_stripe_list)
2069{
2070 struct mddev *mddev = log->rdev->mddev;
2071 struct r5conf *conf = mddev->private;
2072 struct r5l_meta_block *mb;
2073 struct r5l_payload_data_parity *payload;
2074 struct r5l_payload_flush *payload_flush;
2075 int mb_offset;
2076 sector_t log_offset;
2077 sector_t stripe_sect;
2078 struct stripe_head *sh;
2079 int ret;
2080
2081 /*
2082 * for mismatch in data blocks, we will drop all data in this mb, but
2083 * we will still read next mb for other data with FLUSH flag, as
2084 * io_unit could finish out of order.
2085 */
2086 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2087 if (ret == -EINVAL)
2088 return -EAGAIN;
2089 else if (ret)
2090 return ret; /* -ENOMEM duo to alloc_page() failed */
2091
2092 mb = page_address(ctx->meta_page);
2093 mb_offset = sizeof(struct r5l_meta_block);
2094 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2095
2096 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2097 int dd;
2098
2099 payload = (void *)mb + mb_offset;
2100 payload_flush = (void *)mb + mb_offset;
2101
2102 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2103 int i, count;
2104
2105 count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
2106 for (i = 0; i < count; ++i) {
2107 stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
2108 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2109 stripe_sect);
2110 if (sh) {
2111 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2112 r5l_recovery_reset_stripe(sh);
2113 list_del_init(&sh->lru);
2114 raid5_release_stripe(sh);
2115 }
2116 }
2117
2118 mb_offset += sizeof(struct r5l_payload_flush) +
2119 le32_to_cpu(payload_flush->size);
2120 continue;
2121 }
2122
2123 /* DATA or PARITY payload */
2124 stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2125 raid5_compute_sector(
2126 conf, le64_to_cpu(payload->location), 0, &dd,
2127 NULL)
2128 : le64_to_cpu(payload->location);
2129
2130 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2131 stripe_sect);
2132
2133 if (!sh) {
2134 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2135 /*
2136 * cannot get stripe from raid5_get_active_stripe
2137 * try replay some stripes
2138 */
2139 if (!sh) {
2140 r5c_recovery_replay_stripes(
2141 cached_stripe_list, ctx);
2142 sh = r5c_recovery_alloc_stripe(
2143 conf, stripe_sect, 1);
2144 }
2145 if (!sh) {
2146 int new_size = conf->min_nr_stripes * 2;
2147 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2148 mdname(mddev),
2149 new_size);
2150 ret = raid5_set_cache_size(mddev, new_size);
2151 if (conf->min_nr_stripes <= new_size / 2) {
2152 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2153 mdname(mddev),
2154 ret,
2155 new_size,
2156 conf->min_nr_stripes,
2157 conf->max_nr_stripes);
2158 return -ENOMEM;
2159 }
2160 sh = r5c_recovery_alloc_stripe(
2161 conf, stripe_sect, 0);
2162 }
2163 if (!sh) {
2164 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2165 mdname(mddev));
2166 return -ENOMEM;
2167 }
2168 list_add_tail(&sh->lru, cached_stripe_list);
2169 }
2170
2171 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2172 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2173 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2174 r5l_recovery_replay_one_stripe(conf, sh, ctx);
2175 list_move_tail(&sh->lru, cached_stripe_list);
2176 }
2177 r5l_recovery_load_data(log, sh, ctx, payload,
2178 log_offset);
2179 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2180 r5l_recovery_load_parity(log, sh, ctx, payload,
2181 log_offset);
2182 else
2183 return -EINVAL;
2184
2185 log_offset = r5l_ring_add(log, log_offset,
2186 le32_to_cpu(payload->size));
2187
2188 mb_offset += sizeof(struct r5l_payload_data_parity) +
2189 sizeof(__le32) *
2190 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2191 }
2192
2193 return 0;
2194}
2195
2196/*
2197 * Load the stripe into cache. The stripe will be written out later by
2198 * the stripe cache state machine.
2199 */
2200static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2201 struct stripe_head *sh)
2202{
2203 struct r5dev *dev;
2204 int i;
2205
2206 for (i = sh->disks; i--; ) {
2207 dev = sh->dev + i;
2208 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2209 set_bit(R5_InJournal, &dev->flags);
2210 set_bit(R5_UPTODATE, &dev->flags);
2211 }
2212 }
2213}
2214
2215/*
2216 * Scan through the log for all to-be-flushed data
2217 *
2218 * For stripes with data and parity, namely Data-Parity stripe
2219 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2220 *
2221 * For stripes with only data, namely Data-Only stripe
2222 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2223 *
2224 * For a stripe, if we see data after parity, we should discard all previous
2225 * data and parity for this stripe, as these data are already flushed to
2226 * the array.
2227 *
2228 * At the end of the scan, we return the new journal_tail, which points to
2229 * first data-only stripe on the journal device, or next invalid meta block.
2230 */
2231static int r5c_recovery_flush_log(struct r5l_log *log,
2232 struct r5l_recovery_ctx *ctx)
2233{
2234 struct stripe_head *sh;
2235 int ret = 0;
2236
2237 /* scan through the log */
2238 while (1) {
2239 if (r5l_recovery_read_meta_block(log, ctx))
2240 break;
2241
2242 ret = r5c_recovery_analyze_meta_block(log, ctx,
2243 &ctx->cached_list);
2244 /*
2245 * -EAGAIN means mismatch in data block, in this case, we still
2246 * try scan the next metablock
2247 */
2248 if (ret && ret != -EAGAIN)
2249 break; /* ret == -EINVAL or -ENOMEM */
2250 ctx->seq++;
2251 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2252 }
2253
2254 if (ret == -ENOMEM) {
2255 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2256 return ret;
2257 }
2258
2259 /* replay data-parity stripes */
2260 r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2261
2262 /* load data-only stripes to stripe cache */
2263 list_for_each_entry(sh, &ctx->cached_list, lru) {
2264 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2265 r5c_recovery_load_one_stripe(log, sh);
2266 ctx->data_only_stripes++;
2267 }
2268
2269 return 0;
2270}
2271
2272/*
2273 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2274 * log will start here. but we can't let superblock point to last valid
2275 * meta block. The log might looks like:
2276 * | meta 1| meta 2| meta 3|
2277 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2278 * superblock points to meta 1, we write a new valid meta 2n. if crash
2279 * happens again, new recovery will start from meta 1. Since meta 2n is
2280 * valid now, recovery will think meta 3 is valid, which is wrong.
2281 * The solution is we create a new meta in meta2 with its seq == meta
2282 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2283 * will not think meta 3 is a valid meta, because its seq doesn't match
2284 */
2285
2286/*
2287 * Before recovery, the log looks like the following
2288 *
2289 * ---------------------------------------------
2290 * | valid log | invalid log |
2291 * ---------------------------------------------
2292 * ^
2293 * |- log->last_checkpoint
2294 * |- log->last_cp_seq
2295 *
2296 * Now we scan through the log until we see invalid entry
2297 *
2298 * ---------------------------------------------
2299 * | valid log | invalid log |
2300 * ---------------------------------------------
2301 * ^ ^
2302 * |- log->last_checkpoint |- ctx->pos
2303 * |- log->last_cp_seq |- ctx->seq
2304 *
2305 * From this point, we need to increase seq number by 10 to avoid
2306 * confusing next recovery.
2307 *
2308 * ---------------------------------------------
2309 * | valid log | invalid log |
2310 * ---------------------------------------------
2311 * ^ ^
2312 * |- log->last_checkpoint |- ctx->pos+1
2313 * |- log->last_cp_seq |- ctx->seq+10001
2314 *
2315 * However, it is not safe to start the state machine yet, because data only
2316 * parities are not yet secured in RAID. To save these data only parities, we
2317 * rewrite them from seq+11.
2318 *
2319 * -----------------------------------------------------------------
2320 * | valid log | data only stripes | invalid log |
2321 * -----------------------------------------------------------------
2322 * ^ ^
2323 * |- log->last_checkpoint |- ctx->pos+n
2324 * |- log->last_cp_seq |- ctx->seq+10000+n
2325 *
2326 * If failure happens again during this process, the recovery can safe start
2327 * again from log->last_checkpoint.
2328 *
2329 * Once data only stripes are rewritten to journal, we move log_tail
2330 *
2331 * -----------------------------------------------------------------
2332 * | old log | data only stripes | invalid log |
2333 * -----------------------------------------------------------------
2334 * ^ ^
2335 * |- log->last_checkpoint |- ctx->pos+n
2336 * |- log->last_cp_seq |- ctx->seq+10000+n
2337 *
2338 * Then we can safely start the state machine. If failure happens from this
2339 * point on, the recovery will start from new log->last_checkpoint.
2340 */
2341static int
2342r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2343 struct r5l_recovery_ctx *ctx)
2344{
2345 struct stripe_head *sh;
2346 struct mddev *mddev = log->rdev->mddev;
2347 struct page *page;
2348 sector_t next_checkpoint = MaxSector;
2349
2350 page = alloc_page(GFP_KERNEL);
2351 if (!page) {
2352 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2353 mdname(mddev));
2354 return -ENOMEM;
2355 }
2356
2357 WARN_ON(list_empty(&ctx->cached_list));
2358
2359 list_for_each_entry(sh, &ctx->cached_list, lru) {
2360 struct r5l_meta_block *mb;
2361 int i;
2362 int offset;
2363 sector_t write_pos;
2364
2365 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2366 r5l_recovery_create_empty_meta_block(log, page,
2367 ctx->pos, ctx->seq);
2368 mb = page_address(page);
2369 offset = le32_to_cpu(mb->meta_size);
2370 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2371
2372 for (i = sh->disks; i--; ) {
2373 struct r5dev *dev = &sh->dev[i];
2374 struct r5l_payload_data_parity *payload;
2375 void *addr;
2376
2377 if (test_bit(R5_InJournal, &dev->flags)) {
2378 payload = (void *)mb + offset;
2379 payload->header.type = cpu_to_le16(
2380 R5LOG_PAYLOAD_DATA);
2381 payload->size = cpu_to_le32(BLOCK_SECTORS);
2382 payload->location = cpu_to_le64(
2383 raid5_compute_blocknr(sh, i, 0));
2384 addr = kmap_atomic(dev->page);
2385 payload->checksum[0] = cpu_to_le32(
2386 crc32c_le(log->uuid_checksum, addr,
2387 PAGE_SIZE));
2388 kunmap_atomic(addr);
2389 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2390 dev->page, REQ_OP_WRITE, false);
2391 write_pos = r5l_ring_add(log, write_pos,
2392 BLOCK_SECTORS);
2393 offset += sizeof(__le32) +
2394 sizeof(struct r5l_payload_data_parity);
2395
2396 }
2397 }
2398 mb->meta_size = cpu_to_le32(offset);
2399 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2400 mb, PAGE_SIZE));
2401 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2402 REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
2403 sh->log_start = ctx->pos;
2404 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2405 atomic_inc(&log->stripe_in_journal_count);
2406 ctx->pos = write_pos;
2407 ctx->seq += 1;
2408 next_checkpoint = sh->log_start;
2409 }
2410 log->next_checkpoint = next_checkpoint;
2411 __free_page(page);
2412 return 0;
2413}
2414
2415static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2416 struct r5l_recovery_ctx *ctx)
2417{
2418 struct mddev *mddev = log->rdev->mddev;
2419 struct r5conf *conf = mddev->private;
2420 struct stripe_head *sh, *next;
2421 bool cleared_pending = false;
2422
2423 if (ctx->data_only_stripes == 0)
2424 return;
2425
2426 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2427 cleared_pending = true;
2428 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2429 }
2430 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2431
2432 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2433 r5c_make_stripe_write_out(sh);
2434 set_bit(STRIPE_HANDLE, &sh->state);
2435 list_del_init(&sh->lru);
2436 raid5_release_stripe(sh);
2437 }
2438
2439 /* reuse conf->wait_for_quiescent in recovery */
2440 wait_event(conf->wait_for_quiescent,
2441 atomic_read(&conf->active_stripes) == 0);
2442
2443 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2444 if (cleared_pending)
2445 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2446}
2447
2448static int r5l_recovery_log(struct r5l_log *log)
2449{
2450 struct mddev *mddev = log->rdev->mddev;
2451 struct r5l_recovery_ctx *ctx;
2452 int ret;
2453 sector_t pos;
2454
2455 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2456 if (!ctx)
2457 return -ENOMEM;
2458
2459 ctx->pos = log->last_checkpoint;
2460 ctx->seq = log->last_cp_seq;
2461 INIT_LIST_HEAD(&ctx->cached_list);
2462 ctx->meta_page = alloc_page(GFP_KERNEL);
2463
2464 if (!ctx->meta_page) {
2465 ret = -ENOMEM;
2466 goto meta_page;
2467 }
2468
2469 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2470 ret = -ENOMEM;
2471 goto ra_pool;
2472 }
2473
2474 ret = r5c_recovery_flush_log(log, ctx);
2475
2476 if (ret)
2477 goto error;
2478
2479 pos = ctx->pos;
2480 ctx->seq += 10000;
2481
2482 if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
2483 pr_info("md/raid:%s: starting from clean shutdown\n",
2484 mdname(mddev));
2485 else
2486 pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2487 mdname(mddev), ctx->data_only_stripes,
2488 ctx->data_parity_stripes);
2489
2490 if (ctx->data_only_stripes == 0) {
2491 log->next_checkpoint = ctx->pos;
2492 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2493 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2494 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2495 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2496 mdname(mddev));
2497 ret = -EIO;
2498 goto error;
2499 }
2500
2501 log->log_start = ctx->pos;
2502 log->seq = ctx->seq;
2503 log->last_checkpoint = pos;
2504 r5l_write_super(log, pos);
2505
2506 r5c_recovery_flush_data_only_stripes(log, ctx);
2507 ret = 0;
2508error:
2509 r5l_recovery_free_ra_pool(log, ctx);
2510ra_pool:
2511 __free_page(ctx->meta_page);
2512meta_page:
2513 kfree(ctx);
2514 return ret;
2515}
2516
2517static void r5l_write_super(struct r5l_log *log, sector_t cp)
2518{
2519 struct mddev *mddev = log->rdev->mddev;
2520
2521 log->rdev->journal_tail = cp;
2522 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2523}
2524
2525static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2526{
2527 struct r5conf *conf;
2528 int ret;
2529
2530 ret = mddev_lock(mddev);
2531 if (ret)
2532 return ret;
2533
2534 conf = mddev->private;
2535 if (!conf || !conf->log)
2536 goto out_unlock;
2537
2538 switch (conf->log->r5c_journal_mode) {
2539 case R5C_JOURNAL_MODE_WRITE_THROUGH:
2540 ret = snprintf(
2541 page, PAGE_SIZE, "[%s] %s\n",
2542 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2543 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2544 break;
2545 case R5C_JOURNAL_MODE_WRITE_BACK:
2546 ret = snprintf(
2547 page, PAGE_SIZE, "%s [%s]\n",
2548 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2549 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2550 break;
2551 default:
2552 ret = 0;
2553 }
2554
2555out_unlock:
2556 mddev_unlock(mddev);
2557 return ret;
2558}
2559
2560/*
2561 * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2562 *
2563 * @mode as defined in 'enum r5c_journal_mode'.
2564 *
2565 */
2566int r5c_journal_mode_set(struct mddev *mddev, int mode)
2567{
2568 struct r5conf *conf;
2569
2570 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2571 mode > R5C_JOURNAL_MODE_WRITE_BACK)
2572 return -EINVAL;
2573
2574 conf = mddev->private;
2575 if (!conf || !conf->log)
2576 return -ENODEV;
2577
2578 if (raid5_calc_degraded(conf) > 0 &&
2579 mode == R5C_JOURNAL_MODE_WRITE_BACK)
2580 return -EINVAL;
2581
2582 conf->log->r5c_journal_mode = mode;
2583
2584 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2585 mdname(mddev), mode, r5c_journal_mode_str[mode]);
2586 return 0;
2587}
2588EXPORT_SYMBOL(r5c_journal_mode_set);
2589
2590static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2591 const char *page, size_t length)
2592{
2593 int mode = ARRAY_SIZE(r5c_journal_mode_str);
2594 size_t len = length;
2595 int ret;
2596
2597 if (len < 2)
2598 return -EINVAL;
2599
2600 if (page[len - 1] == '\n')
2601 len--;
2602
2603 while (mode--)
2604 if (strlen(r5c_journal_mode_str[mode]) == len &&
2605 !strncmp(page, r5c_journal_mode_str[mode], len))
2606 break;
2607 ret = mddev_suspend_and_lock(mddev);
2608 if (ret)
2609 return ret;
2610 ret = r5c_journal_mode_set(mddev, mode);
2611 mddev_unlock_and_resume(mddev);
2612 return ret ?: length;
2613}
2614
2615struct md_sysfs_entry
2616r5c_journal_mode = __ATTR(journal_mode, 0644,
2617 r5c_journal_mode_show, r5c_journal_mode_store);
2618
2619/*
2620 * Try handle write operation in caching phase. This function should only
2621 * be called in write-back mode.
2622 *
2623 * If all outstanding writes can be handled in caching phase, returns 0
2624 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2625 * and returns -EAGAIN
2626 */
2627int r5c_try_caching_write(struct r5conf *conf,
2628 struct stripe_head *sh,
2629 struct stripe_head_state *s,
2630 int disks)
2631{
2632 struct r5l_log *log = READ_ONCE(conf->log);
2633 int i;
2634 struct r5dev *dev;
2635 int to_cache = 0;
2636 void __rcu **pslot;
2637 sector_t tree_index;
2638 int ret;
2639 uintptr_t refcount;
2640
2641 BUG_ON(!r5c_is_writeback(log));
2642
2643 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
2644 /*
2645 * There are two different scenarios here:
2646 * 1. The stripe has some data cached, and it is sent to
2647 * write-out phase for reclaim
2648 * 2. The stripe is clean, and this is the first write
2649 *
2650 * For 1, return -EAGAIN, so we continue with
2651 * handle_stripe_dirtying().
2652 *
2653 * For 2, set STRIPE_R5C_CACHING and continue with caching
2654 * write.
2655 */
2656
2657 /* case 1: anything injournal or anything in written */
2658 if (s->injournal > 0 || s->written > 0)
2659 return -EAGAIN;
2660 /* case 2 */
2661 set_bit(STRIPE_R5C_CACHING, &sh->state);
2662 }
2663
2664 /*
2665 * When run in degraded mode, array is set to write-through mode.
2666 * This check helps drain pending write safely in the transition to
2667 * write-through mode.
2668 *
2669 * When a stripe is syncing, the write is also handled in write
2670 * through mode.
2671 */
2672 if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
2673 r5c_make_stripe_write_out(sh);
2674 return -EAGAIN;
2675 }
2676
2677 for (i = disks; i--; ) {
2678 dev = &sh->dev[i];
2679 /* if non-overwrite, use writing-out phase */
2680 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2681 !test_bit(R5_InJournal, &dev->flags)) {
2682 r5c_make_stripe_write_out(sh);
2683 return -EAGAIN;
2684 }
2685 }
2686
2687 /* if the stripe is not counted in big_stripe_tree, add it now */
2688 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
2689 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2690 tree_index = r5c_tree_index(conf, sh->sector);
2691 spin_lock(&log->tree_lock);
2692 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2693 tree_index);
2694 if (pslot) {
2695 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2696 pslot, &log->tree_lock) >>
2697 R5C_RADIX_COUNT_SHIFT;
2698 radix_tree_replace_slot(
2699 &log->big_stripe_tree, pslot,
2700 (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
2701 } else {
2702 /*
2703 * this radix_tree_insert can fail safely, so no
2704 * need to call radix_tree_preload()
2705 */
2706 ret = radix_tree_insert(
2707 &log->big_stripe_tree, tree_index,
2708 (void *)(1 << R5C_RADIX_COUNT_SHIFT));
2709 if (ret) {
2710 spin_unlock(&log->tree_lock);
2711 r5c_make_stripe_write_out(sh);
2712 return -EAGAIN;
2713 }
2714 }
2715 spin_unlock(&log->tree_lock);
2716
2717 /*
2718 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2719 * counted in the radix tree
2720 */
2721 set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
2722 atomic_inc(&conf->r5c_cached_partial_stripes);
2723 }
2724
2725 for (i = disks; i--; ) {
2726 dev = &sh->dev[i];
2727 if (dev->towrite) {
2728 set_bit(R5_Wantwrite, &dev->flags);
2729 set_bit(R5_Wantdrain, &dev->flags);
2730 set_bit(R5_LOCKED, &dev->flags);
2731 to_cache++;
2732 }
2733 }
2734
2735 if (to_cache) {
2736 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2737 /*
2738 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2739 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2740 * r5c_handle_data_cached()
2741 */
2742 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
2743 }
2744
2745 return 0;
2746}
2747
2748/*
2749 * free extra pages (orig_page) we allocated for prexor
2750 */
2751void r5c_release_extra_page(struct stripe_head *sh)
2752{
2753 struct r5conf *conf = sh->raid_conf;
2754 int i;
2755 bool using_disk_info_extra_page;
2756
2757 using_disk_info_extra_page =
2758 sh->dev[0].orig_page == conf->disks[0].extra_page;
2759
2760 for (i = sh->disks; i--; )
2761 if (sh->dev[i].page != sh->dev[i].orig_page) {
2762 struct page *p = sh->dev[i].orig_page;
2763
2764 sh->dev[i].orig_page = sh->dev[i].page;
2765 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2766
2767 if (!using_disk_info_extra_page)
2768 put_page(p);
2769 }
2770
2771 if (using_disk_info_extra_page) {
2772 clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2773 md_wakeup_thread(conf->mddev->thread);
2774 }
2775}
2776
2777void r5c_use_extra_page(struct stripe_head *sh)
2778{
2779 struct r5conf *conf = sh->raid_conf;
2780 int i;
2781 struct r5dev *dev;
2782
2783 for (i = sh->disks; i--; ) {
2784 dev = &sh->dev[i];
2785 if (dev->orig_page != dev->page)
2786 put_page(dev->orig_page);
2787 dev->orig_page = conf->disks[i].extra_page;
2788 }
2789}
2790
2791/*
2792 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2793 * stripe is committed to RAID disks.
2794 */
2795void r5c_finish_stripe_write_out(struct r5conf *conf,
2796 struct stripe_head *sh,
2797 struct stripe_head_state *s)
2798{
2799 struct r5l_log *log = READ_ONCE(conf->log);
2800 int i;
2801 int do_wakeup = 0;
2802 sector_t tree_index;
2803 void __rcu **pslot;
2804 uintptr_t refcount;
2805
2806 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2807 return;
2808
2809 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2810 clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2811
2812 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2813 return;
2814
2815 for (i = sh->disks; i--; ) {
2816 clear_bit(R5_InJournal, &sh->dev[i].flags);
2817 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2818 do_wakeup = 1;
2819 }
2820
2821 /*
2822 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2823 * We updated R5_InJournal, so we also update s->injournal.
2824 */
2825 s->injournal = 0;
2826
2827 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2828 if (atomic_dec_and_test(&conf->pending_full_writes))
2829 md_wakeup_thread(conf->mddev->thread);
2830
2831 if (do_wakeup)
2832 wake_up(&conf->wait_for_overlap);
2833
2834 spin_lock_irq(&log->stripe_in_journal_lock);
2835 list_del_init(&sh->r5c);
2836 spin_unlock_irq(&log->stripe_in_journal_lock);
2837 sh->log_start = MaxSector;
2838
2839 atomic_dec(&log->stripe_in_journal_count);
2840 r5c_update_log_state(log);
2841
2842 /* stop counting this stripe in big_stripe_tree */
2843 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
2844 test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2845 tree_index = r5c_tree_index(conf, sh->sector);
2846 spin_lock(&log->tree_lock);
2847 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2848 tree_index);
2849 BUG_ON(pslot == NULL);
2850 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2851 pslot, &log->tree_lock) >>
2852 R5C_RADIX_COUNT_SHIFT;
2853 if (refcount == 1)
2854 radix_tree_delete(&log->big_stripe_tree, tree_index);
2855 else
2856 radix_tree_replace_slot(
2857 &log->big_stripe_tree, pslot,
2858 (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
2859 spin_unlock(&log->tree_lock);
2860 }
2861
2862 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
2863 BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
2864 atomic_dec(&conf->r5c_flushing_partial_stripes);
2865 atomic_dec(&conf->r5c_cached_partial_stripes);
2866 }
2867
2868 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2869 BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
2870 atomic_dec(&conf->r5c_flushing_full_stripes);
2871 atomic_dec(&conf->r5c_cached_full_stripes);
2872 }
2873
2874 r5l_append_flush_payload(log, sh->sector);
2875 /* stripe is flused to raid disks, we can do resync now */
2876 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2877 set_bit(STRIPE_HANDLE, &sh->state);
2878}
2879
2880int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2881{
2882 struct r5conf *conf = sh->raid_conf;
2883 int pages = 0;
2884 int reserve;
2885 int i;
2886 int ret = 0;
2887
2888 BUG_ON(!log);
2889
2890 for (i = 0; i < sh->disks; i++) {
2891 void *addr;
2892
2893 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2894 continue;
2895 addr = kmap_atomic(sh->dev[i].page);
2896 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2897 addr, PAGE_SIZE);
2898 kunmap_atomic(addr);
2899 pages++;
2900 }
2901 WARN_ON(pages == 0);
2902
2903 /*
2904 * The stripe must enter state machine again to call endio, so
2905 * don't delay.
2906 */
2907 clear_bit(STRIPE_DELAYED, &sh->state);
2908 atomic_inc(&sh->count);
2909
2910 mutex_lock(&log->io_mutex);
2911 /* meta + data */
2912 reserve = (1 + pages) << (PAGE_SHIFT - 9);
2913
2914 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2915 sh->log_start == MaxSector)
2916 r5l_add_no_space_stripe(log, sh);
2917 else if (!r5l_has_free_space(log, reserve)) {
2918 if (sh->log_start == log->last_checkpoint)
2919 BUG();
2920 else
2921 r5l_add_no_space_stripe(log, sh);
2922 } else {
2923 ret = r5l_log_stripe(log, sh, pages, 0);
2924 if (ret) {
2925 spin_lock_irq(&log->io_list_lock);
2926 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2927 spin_unlock_irq(&log->io_list_lock);
2928 }
2929 }
2930
2931 mutex_unlock(&log->io_mutex);
2932 return 0;
2933}
2934
2935/* check whether this big stripe is in write back cache. */
2936bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
2937{
2938 struct r5l_log *log = READ_ONCE(conf->log);
2939 sector_t tree_index;
2940 void *slot;
2941
2942 if (!log)
2943 return false;
2944
2945 tree_index = r5c_tree_index(conf, sect);
2946 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2947 return slot != NULL;
2948}
2949
2950static int r5l_load_log(struct r5l_log *log)
2951{
2952 struct md_rdev *rdev = log->rdev;
2953 struct page *page;
2954 struct r5l_meta_block *mb;
2955 sector_t cp = log->rdev->journal_tail;
2956 u32 stored_crc, expected_crc;
2957 bool create_super = false;
2958 int ret = 0;
2959
2960 /* Make sure it's valid */
2961 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2962 cp = 0;
2963 page = alloc_page(GFP_KERNEL);
2964 if (!page)
2965 return -ENOMEM;
2966
2967 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
2968 ret = -EIO;
2969 goto ioerr;
2970 }
2971 mb = page_address(page);
2972
2973 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2974 mb->version != R5LOG_VERSION) {
2975 create_super = true;
2976 goto create;
2977 }
2978 stored_crc = le32_to_cpu(mb->checksum);
2979 mb->checksum = 0;
2980 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2981 if (stored_crc != expected_crc) {
2982 create_super = true;
2983 goto create;
2984 }
2985 if (le64_to_cpu(mb->position) != cp) {
2986 create_super = true;
2987 goto create;
2988 }
2989create:
2990 if (create_super) {
2991 log->last_cp_seq = get_random_u32();
2992 cp = 0;
2993 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
2994 /*
2995 * Make sure super points to correct address. Log might have
2996 * data very soon. If super hasn't correct log tail address,
2997 * recovery can't find the log
2998 */
2999 r5l_write_super(log, cp);
3000 } else
3001 log->last_cp_seq = le64_to_cpu(mb->seq);
3002
3003 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
3004 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
3005 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
3006 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
3007 log->last_checkpoint = cp;
3008
3009 __free_page(page);
3010
3011 if (create_super) {
3012 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3013 log->seq = log->last_cp_seq + 1;
3014 log->next_checkpoint = cp;
3015 } else
3016 ret = r5l_recovery_log(log);
3017
3018 r5c_update_log_state(log);
3019 return ret;
3020ioerr:
3021 __free_page(page);
3022 return ret;
3023}
3024
3025int r5l_start(struct r5l_log *log)
3026{
3027 int ret;
3028
3029 if (!log)
3030 return 0;
3031
3032 ret = r5l_load_log(log);
3033 if (ret) {
3034 struct mddev *mddev = log->rdev->mddev;
3035 struct r5conf *conf = mddev->private;
3036
3037 r5l_exit_log(conf);
3038 }
3039 return ret;
3040}
3041
3042void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
3043{
3044 struct r5conf *conf = mddev->private;
3045 struct r5l_log *log = READ_ONCE(conf->log);
3046
3047 if (!log)
3048 return;
3049
3050 if ((raid5_calc_degraded(conf) > 0 ||
3051 test_bit(Journal, &rdev->flags)) &&
3052 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3053 schedule_work(&log->disable_writeback_work);
3054}
3055
3056int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
3057{
3058 struct r5l_log *log;
3059 struct md_thread *thread;
3060 int ret;
3061
3062 pr_debug("md/raid:%s: using device %pg as journal\n",
3063 mdname(conf->mddev), rdev->bdev);
3064
3065 if (PAGE_SIZE != 4096)
3066 return -EINVAL;
3067
3068 /*
3069 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3070 * raid_disks r5l_payload_data_parity.
3071 *
3072 * Write journal and cache does not work for very big array
3073 * (raid_disks > 203)
3074 */
3075 if (sizeof(struct r5l_meta_block) +
3076 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3077 conf->raid_disks) > PAGE_SIZE) {
3078 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3079 mdname(conf->mddev), conf->raid_disks);
3080 return -EINVAL;
3081 }
3082
3083 log = kzalloc(sizeof(*log), GFP_KERNEL);
3084 if (!log)
3085 return -ENOMEM;
3086 log->rdev = rdev;
3087 log->need_cache_flush = bdev_write_cache(rdev->bdev);
3088 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3089 sizeof(rdev->mddev->uuid));
3090
3091 mutex_init(&log->io_mutex);
3092
3093 spin_lock_init(&log->io_list_lock);
3094 INIT_LIST_HEAD(&log->running_ios);
3095 INIT_LIST_HEAD(&log->io_end_ios);
3096 INIT_LIST_HEAD(&log->flushing_ios);
3097 INIT_LIST_HEAD(&log->finished_ios);
3098
3099 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3100 if (!log->io_kc)
3101 goto io_kc;
3102
3103 ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3104 if (ret)
3105 goto io_pool;
3106
3107 ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3108 if (ret)
3109 goto io_bs;
3110
3111 ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3112 if (ret)
3113 goto out_mempool;
3114
3115 spin_lock_init(&log->tree_lock);
3116 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3117
3118 thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
3119 "reclaim");
3120 if (!thread)
3121 goto reclaim_thread;
3122
3123 thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
3124 rcu_assign_pointer(log->reclaim_thread, thread);
3125
3126 init_waitqueue_head(&log->iounit_wait);
3127
3128 INIT_LIST_HEAD(&log->no_mem_stripes);
3129
3130 INIT_LIST_HEAD(&log->no_space_stripes);
3131 spin_lock_init(&log->no_space_stripes_lock);
3132
3133 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3134 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3135
3136 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3137 INIT_LIST_HEAD(&log->stripe_in_journal_list);
3138 spin_lock_init(&log->stripe_in_journal_lock);
3139 atomic_set(&log->stripe_in_journal_count, 0);
3140
3141 WRITE_ONCE(conf->log, log);
3142
3143 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
3144 return 0;
3145
3146reclaim_thread:
3147 mempool_exit(&log->meta_pool);
3148out_mempool:
3149 bioset_exit(&log->bs);
3150io_bs:
3151 mempool_exit(&log->io_pool);
3152io_pool:
3153 kmem_cache_destroy(log->io_kc);
3154io_kc:
3155 kfree(log);
3156 return -EINVAL;
3157}
3158
3159void r5l_exit_log(struct r5conf *conf)
3160{
3161 struct r5l_log *log = conf->log;
3162
3163 md_unregister_thread(conf->mddev, &log->reclaim_thread);
3164
3165 /*
3166 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3167 * ensure disable_writeback_work wakes up and exits.
3168 */
3169 WRITE_ONCE(conf->log, NULL);
3170 wake_up(&conf->mddev->sb_wait);
3171 flush_work(&log->disable_writeback_work);
3172
3173 mempool_exit(&log->meta_pool);
3174 bioset_exit(&log->bs);
3175 mempool_exit(&log->io_pool);
3176 kmem_cache_destroy(log->io_kc);
3177 kfree(log);
3178}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
5 */
6#include <linux/kernel.h>
7#include <linux/wait.h>
8#include <linux/blkdev.h>
9#include <linux/slab.h>
10#include <linux/raid/md_p.h>
11#include <linux/crc32c.h>
12#include <linux/random.h>
13#include <linux/kthread.h>
14#include <linux/types.h>
15#include "md.h"
16#include "raid5.h"
17#include "md-bitmap.h"
18#include "raid5-log.h"
19
20/*
21 * metadata/data stored in disk with 4k size unit (a block) regardless
22 * underneath hardware sector size. only works with PAGE_SIZE == 4096
23 */
24#define BLOCK_SECTORS (8)
25#define BLOCK_SECTOR_SHIFT (3)
26
27/*
28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
29 *
30 * In write through mode, the reclaim runs every log->max_free_space.
31 * This can prevent the recovery scans for too long
32 */
33#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
34#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
35
36/* wake up reclaim thread periodically */
37#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
38/* start flush with these full stripes */
39#define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
40/* reclaim stripes in groups */
41#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
42
43/*
44 * We only need 2 bios per I/O unit to make progress, but ensure we
45 * have a few more available to not get too tight.
46 */
47#define R5L_POOL_SIZE 4
48
49static char *r5c_journal_mode_str[] = {"write-through",
50 "write-back"};
51/*
52 * raid5 cache state machine
53 *
54 * With the RAID cache, each stripe works in two phases:
55 * - caching phase
56 * - writing-out phase
57 *
58 * These two phases are controlled by bit STRIPE_R5C_CACHING:
59 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
60 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
61 *
62 * When there is no journal, or the journal is in write-through mode,
63 * the stripe is always in writing-out phase.
64 *
65 * For write-back journal, the stripe is sent to caching phase on write
66 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
67 * the write-out phase by clearing STRIPE_R5C_CACHING.
68 *
69 * Stripes in caching phase do not write the raid disks. Instead, all
70 * writes are committed from the log device. Therefore, a stripe in
71 * caching phase handles writes as:
72 * - write to log device
73 * - return IO
74 *
75 * Stripes in writing-out phase handle writes as:
76 * - calculate parity
77 * - write pending data and parity to journal
78 * - write data and parity to raid disks
79 * - return IO for pending writes
80 */
81
82struct r5l_log {
83 struct md_rdev *rdev;
84
85 u32 uuid_checksum;
86
87 sector_t device_size; /* log device size, round to
88 * BLOCK_SECTORS */
89 sector_t max_free_space; /* reclaim run if free space is at
90 * this size */
91
92 sector_t last_checkpoint; /* log tail. where recovery scan
93 * starts from */
94 u64 last_cp_seq; /* log tail sequence */
95
96 sector_t log_start; /* log head. where new data appends */
97 u64 seq; /* log head sequence */
98
99 sector_t next_checkpoint;
100
101 struct mutex io_mutex;
102 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
103
104 spinlock_t io_list_lock;
105 struct list_head running_ios; /* io_units which are still running,
106 * and have not yet been completely
107 * written to the log */
108 struct list_head io_end_ios; /* io_units which have been completely
109 * written to the log but not yet written
110 * to the RAID */
111 struct list_head flushing_ios; /* io_units which are waiting for log
112 * cache flush */
113 struct list_head finished_ios; /* io_units which settle down in log disk */
114 struct bio flush_bio;
115
116 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
117
118 struct kmem_cache *io_kc;
119 mempool_t io_pool;
120 struct bio_set bs;
121 mempool_t meta_pool;
122
123 struct md_thread __rcu *reclaim_thread;
124 unsigned long reclaim_target; /* number of space that need to be
125 * reclaimed. if it's 0, reclaim spaces
126 * used by io_units which are in
127 * IO_UNIT_STRIPE_END state (eg, reclaim
128 * doesn't wait for specific io_unit
129 * switching to IO_UNIT_STRIPE_END
130 * state) */
131 wait_queue_head_t iounit_wait;
132
133 struct list_head no_space_stripes; /* pending stripes, log has no space */
134 spinlock_t no_space_stripes_lock;
135
136 bool need_cache_flush;
137
138 /* for r5c_cache */
139 enum r5c_journal_mode r5c_journal_mode;
140
141 /* all stripes in r5cache, in the order of seq at sh->log_start */
142 struct list_head stripe_in_journal_list;
143
144 spinlock_t stripe_in_journal_lock;
145 atomic_t stripe_in_journal_count;
146
147 /* to submit async io_units, to fulfill ordering of flush */
148 struct work_struct deferred_io_work;
149 /* to disable write back during in degraded mode */
150 struct work_struct disable_writeback_work;
151
152 /* to for chunk_aligned_read in writeback mode, details below */
153 spinlock_t tree_lock;
154 struct radix_tree_root big_stripe_tree;
155};
156
157/*
158 * Enable chunk_aligned_read() with write back cache.
159 *
160 * Each chunk may contain more than one stripe (for example, a 256kB
161 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
162 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
163 * For each big_stripe, we count how many stripes of this big_stripe
164 * are in the write back cache. These data are tracked in a radix tree
165 * (big_stripe_tree). We use radix_tree item pointer as the counter.
166 * r5c_tree_index() is used to calculate keys for the radix tree.
167 *
168 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
169 * big_stripe of each chunk in the tree. If this big_stripe is in the
170 * tree, chunk_aligned_read() aborts. This look up is protected by
171 * rcu_read_lock().
172 *
173 * It is necessary to remember whether a stripe is counted in
174 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
175 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
176 * two flags are set, the stripe is counted in big_stripe_tree. This
177 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
178 * r5c_try_caching_write(); and moving clear_bit of
179 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
180 * r5c_finish_stripe_write_out().
181 */
182
183/*
184 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
185 * So it is necessary to left shift the counter by 2 bits before using it
186 * as data pointer of the tree.
187 */
188#define R5C_RADIX_COUNT_SHIFT 2
189
190/*
191 * calculate key for big_stripe_tree
192 *
193 * sect: align_bi->bi_iter.bi_sector or sh->sector
194 */
195static inline sector_t r5c_tree_index(struct r5conf *conf,
196 sector_t sect)
197{
198 sector_div(sect, conf->chunk_sectors);
199 return sect;
200}
201
202/*
203 * an IO range starts from a meta data block and end at the next meta data
204 * block. The io unit's the meta data block tracks data/parity followed it. io
205 * unit is written to log disk with normal write, as we always flush log disk
206 * first and then start move data to raid disks, there is no requirement to
207 * write io unit with FLUSH/FUA
208 */
209struct r5l_io_unit {
210 struct r5l_log *log;
211
212 struct page *meta_page; /* store meta block */
213 int meta_offset; /* current offset in meta_page */
214
215 struct bio *current_bio;/* current_bio accepting new data */
216
217 atomic_t pending_stripe;/* how many stripes not flushed to raid */
218 u64 seq; /* seq number of the metablock */
219 sector_t log_start; /* where the io_unit starts */
220 sector_t log_end; /* where the io_unit ends */
221 struct list_head log_sibling; /* log->running_ios */
222 struct list_head stripe_list; /* stripes added to the io_unit */
223
224 int state;
225 bool need_split_bio;
226 struct bio *split_bio;
227
228 unsigned int has_flush:1; /* include flush request */
229 unsigned int has_fua:1; /* include fua request */
230 unsigned int has_null_flush:1; /* include null flush request */
231 unsigned int has_flush_payload:1; /* include flush payload */
232 /*
233 * io isn't sent yet, flush/fua request can only be submitted till it's
234 * the first IO in running_ios list
235 */
236 unsigned int io_deferred:1;
237
238 struct bio_list flush_barriers; /* size == 0 flush bios */
239};
240
241/* r5l_io_unit state */
242enum r5l_io_unit_state {
243 IO_UNIT_RUNNING = 0, /* accepting new IO */
244 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
245 * don't accepting new bio */
246 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
247 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
248};
249
250bool r5c_is_writeback(struct r5l_log *log)
251{
252 return (log != NULL &&
253 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
254}
255
256static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
257{
258 start += inc;
259 if (start >= log->device_size)
260 start = start - log->device_size;
261 return start;
262}
263
264static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
265 sector_t end)
266{
267 if (end >= start)
268 return end - start;
269 else
270 return end + log->device_size - start;
271}
272
273static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
274{
275 sector_t used_size;
276
277 used_size = r5l_ring_distance(log, log->last_checkpoint,
278 log->log_start);
279
280 return log->device_size > used_size + size;
281}
282
283static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
284 enum r5l_io_unit_state state)
285{
286 if (WARN_ON(io->state >= state))
287 return;
288 io->state = state;
289}
290
291static void
292r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
293{
294 struct bio *wbi, *wbi2;
295
296 wbi = dev->written;
297 dev->written = NULL;
298 while (wbi && wbi->bi_iter.bi_sector <
299 dev->sector + RAID5_STRIPE_SECTORS(conf)) {
300 wbi2 = r5_next_bio(conf, wbi, dev->sector);
301 md_write_end(conf->mddev);
302 bio_endio(wbi);
303 wbi = wbi2;
304 }
305}
306
307void r5c_handle_cached_data_endio(struct r5conf *conf,
308 struct stripe_head *sh, int disks)
309{
310 int i;
311
312 for (i = sh->disks; i--; ) {
313 if (sh->dev[i].written) {
314 set_bit(R5_UPTODATE, &sh->dev[i].flags);
315 r5c_return_dev_pending_writes(conf, &sh->dev[i]);
316 }
317 }
318}
319
320void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
321
322/* Check whether we should flush some stripes to free up stripe cache */
323void r5c_check_stripe_cache_usage(struct r5conf *conf)
324{
325 int total_cached;
326 struct r5l_log *log = READ_ONCE(conf->log);
327
328 if (!r5c_is_writeback(log))
329 return;
330
331 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
332 atomic_read(&conf->r5c_cached_full_stripes);
333
334 /*
335 * The following condition is true for either of the following:
336 * - stripe cache pressure high:
337 * total_cached > 3/4 min_nr_stripes ||
338 * empty_inactive_list_nr > 0
339 * - stripe cache pressure moderate:
340 * total_cached > 1/2 min_nr_stripes
341 */
342 if (total_cached > conf->min_nr_stripes * 1 / 2 ||
343 atomic_read(&conf->empty_inactive_list_nr) > 0)
344 r5l_wake_reclaim(log, 0);
345}
346
347/*
348 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
349 * stripes in the cache
350 */
351void r5c_check_cached_full_stripe(struct r5conf *conf)
352{
353 struct r5l_log *log = READ_ONCE(conf->log);
354
355 if (!r5c_is_writeback(log))
356 return;
357
358 /*
359 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
360 * or a full stripe (chunk size / 4k stripes).
361 */
362 if (atomic_read(&conf->r5c_cached_full_stripes) >=
363 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
364 conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
365 r5l_wake_reclaim(log, 0);
366}
367
368/*
369 * Total log space (in sectors) needed to flush all data in cache
370 *
371 * To avoid deadlock due to log space, it is necessary to reserve log
372 * space to flush critical stripes (stripes that occupying log space near
373 * last_checkpoint). This function helps check how much log space is
374 * required to flush all cached stripes.
375 *
376 * To reduce log space requirements, two mechanisms are used to give cache
377 * flush higher priorities:
378 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
379 * stripes ALREADY in journal can be flushed w/o pending writes;
380 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
381 * can be delayed (r5l_add_no_space_stripe).
382 *
383 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
384 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
385 * pages of journal space. For stripes that has not passed 1, flushing it
386 * requires (conf->raid_disks + 1) pages of journal space. There are at
387 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
388 * required to flush all cached stripes (in pages) is:
389 *
390 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
391 * (group_cnt + 1) * (raid_disks + 1)
392 * or
393 * (stripe_in_journal_count) * (max_degraded + 1) +
394 * (group_cnt + 1) * (raid_disks - max_degraded)
395 */
396static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
397{
398 struct r5l_log *log = READ_ONCE(conf->log);
399
400 if (!r5c_is_writeback(log))
401 return 0;
402
403 return BLOCK_SECTORS *
404 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
405 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
406}
407
408/*
409 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
410 *
411 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
412 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
413 * device is less than 2x of reclaim_required_space.
414 */
415static inline void r5c_update_log_state(struct r5l_log *log)
416{
417 struct r5conf *conf = log->rdev->mddev->private;
418 sector_t free_space;
419 sector_t reclaim_space;
420 bool wake_reclaim = false;
421
422 if (!r5c_is_writeback(log))
423 return;
424
425 free_space = r5l_ring_distance(log, log->log_start,
426 log->last_checkpoint);
427 reclaim_space = r5c_log_required_to_flush_cache(conf);
428 if (free_space < 2 * reclaim_space)
429 set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
430 else {
431 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
432 wake_reclaim = true;
433 clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
434 }
435 if (free_space < 3 * reclaim_space)
436 set_bit(R5C_LOG_TIGHT, &conf->cache_state);
437 else
438 clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
439
440 if (wake_reclaim)
441 r5l_wake_reclaim(log, 0);
442}
443
444/*
445 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
446 * This function should only be called in write-back mode.
447 */
448void r5c_make_stripe_write_out(struct stripe_head *sh)
449{
450 struct r5conf *conf = sh->raid_conf;
451 struct r5l_log *log = READ_ONCE(conf->log);
452
453 BUG_ON(!r5c_is_writeback(log));
454
455 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
456 clear_bit(STRIPE_R5C_CACHING, &sh->state);
457
458 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
459 atomic_inc(&conf->preread_active_stripes);
460}
461
462static void r5c_handle_data_cached(struct stripe_head *sh)
463{
464 int i;
465
466 for (i = sh->disks; i--; )
467 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
468 set_bit(R5_InJournal, &sh->dev[i].flags);
469 clear_bit(R5_LOCKED, &sh->dev[i].flags);
470 }
471 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
472}
473
474/*
475 * this journal write must contain full parity,
476 * it may also contain some data pages
477 */
478static void r5c_handle_parity_cached(struct stripe_head *sh)
479{
480 int i;
481
482 for (i = sh->disks; i--; )
483 if (test_bit(R5_InJournal, &sh->dev[i].flags))
484 set_bit(R5_Wantwrite, &sh->dev[i].flags);
485}
486
487/*
488 * Setting proper flags after writing (or flushing) data and/or parity to the
489 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
490 */
491static void r5c_finish_cache_stripe(struct stripe_head *sh)
492{
493 struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
494
495 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
496 BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
497 /*
498 * Set R5_InJournal for parity dev[pd_idx]. This means
499 * all data AND parity in the journal. For RAID 6, it is
500 * NOT necessary to set the flag for dev[qd_idx], as the
501 * two parities are written out together.
502 */
503 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
504 } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
505 r5c_handle_data_cached(sh);
506 } else {
507 r5c_handle_parity_cached(sh);
508 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
509 }
510}
511
512static void r5l_io_run_stripes(struct r5l_io_unit *io)
513{
514 struct stripe_head *sh, *next;
515
516 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
517 list_del_init(&sh->log_list);
518
519 r5c_finish_cache_stripe(sh);
520
521 set_bit(STRIPE_HANDLE, &sh->state);
522 raid5_release_stripe(sh);
523 }
524}
525
526static void r5l_log_run_stripes(struct r5l_log *log)
527{
528 struct r5l_io_unit *io, *next;
529
530 lockdep_assert_held(&log->io_list_lock);
531
532 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
533 /* don't change list order */
534 if (io->state < IO_UNIT_IO_END)
535 break;
536
537 list_move_tail(&io->log_sibling, &log->finished_ios);
538 r5l_io_run_stripes(io);
539 }
540}
541
542static void r5l_move_to_end_ios(struct r5l_log *log)
543{
544 struct r5l_io_unit *io, *next;
545
546 lockdep_assert_held(&log->io_list_lock);
547
548 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
549 /* don't change list order */
550 if (io->state < IO_UNIT_IO_END)
551 break;
552 list_move_tail(&io->log_sibling, &log->io_end_ios);
553 }
554}
555
556static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
557static void r5l_log_endio(struct bio *bio)
558{
559 struct r5l_io_unit *io = bio->bi_private;
560 struct r5l_io_unit *io_deferred;
561 struct r5l_log *log = io->log;
562 unsigned long flags;
563 bool has_null_flush;
564 bool has_flush_payload;
565
566 if (bio->bi_status)
567 md_error(log->rdev->mddev, log->rdev);
568
569 bio_put(bio);
570 mempool_free(io->meta_page, &log->meta_pool);
571
572 spin_lock_irqsave(&log->io_list_lock, flags);
573 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
574
575 /*
576 * if the io doesn't not have null_flush or flush payload,
577 * it is not safe to access it after releasing io_list_lock.
578 * Therefore, it is necessary to check the condition with
579 * the lock held.
580 */
581 has_null_flush = io->has_null_flush;
582 has_flush_payload = io->has_flush_payload;
583
584 if (log->need_cache_flush && !list_empty(&io->stripe_list))
585 r5l_move_to_end_ios(log);
586 else
587 r5l_log_run_stripes(log);
588 if (!list_empty(&log->running_ios)) {
589 /*
590 * FLUSH/FUA io_unit is deferred because of ordering, now we
591 * can dispatch it
592 */
593 io_deferred = list_first_entry(&log->running_ios,
594 struct r5l_io_unit, log_sibling);
595 if (io_deferred->io_deferred)
596 schedule_work(&log->deferred_io_work);
597 }
598
599 spin_unlock_irqrestore(&log->io_list_lock, flags);
600
601 if (log->need_cache_flush)
602 md_wakeup_thread(log->rdev->mddev->thread);
603
604 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
605 if (has_null_flush) {
606 struct bio *bi;
607
608 WARN_ON(bio_list_empty(&io->flush_barriers));
609 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
610 bio_endio(bi);
611 if (atomic_dec_and_test(&io->pending_stripe)) {
612 __r5l_stripe_write_finished(io);
613 return;
614 }
615 }
616 }
617 /* decrease pending_stripe for flush payload */
618 if (has_flush_payload)
619 if (atomic_dec_and_test(&io->pending_stripe))
620 __r5l_stripe_write_finished(io);
621}
622
623static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
624{
625 unsigned long flags;
626
627 spin_lock_irqsave(&log->io_list_lock, flags);
628 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
629 spin_unlock_irqrestore(&log->io_list_lock, flags);
630
631 /*
632 * In case of journal device failures, submit_bio will get error
633 * and calls endio, then active stripes will continue write
634 * process. Therefore, it is not necessary to check Faulty bit
635 * of journal device here.
636 *
637 * We can't check split_bio after current_bio is submitted. If
638 * io->split_bio is null, after current_bio is submitted, current_bio
639 * might already be completed and the io_unit is freed. We submit
640 * split_bio first to avoid the issue.
641 */
642 if (io->split_bio) {
643 if (io->has_flush)
644 io->split_bio->bi_opf |= REQ_PREFLUSH;
645 if (io->has_fua)
646 io->split_bio->bi_opf |= REQ_FUA;
647 submit_bio(io->split_bio);
648 }
649
650 if (io->has_flush)
651 io->current_bio->bi_opf |= REQ_PREFLUSH;
652 if (io->has_fua)
653 io->current_bio->bi_opf |= REQ_FUA;
654 submit_bio(io->current_bio);
655}
656
657/* deferred io_unit will be dispatched here */
658static void r5l_submit_io_async(struct work_struct *work)
659{
660 struct r5l_log *log = container_of(work, struct r5l_log,
661 deferred_io_work);
662 struct r5l_io_unit *io = NULL;
663 unsigned long flags;
664
665 spin_lock_irqsave(&log->io_list_lock, flags);
666 if (!list_empty(&log->running_ios)) {
667 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
668 log_sibling);
669 if (!io->io_deferred)
670 io = NULL;
671 else
672 io->io_deferred = 0;
673 }
674 spin_unlock_irqrestore(&log->io_list_lock, flags);
675 if (io)
676 r5l_do_submit_io(log, io);
677}
678
679static void r5c_disable_writeback_async(struct work_struct *work)
680{
681 struct r5l_log *log = container_of(work, struct r5l_log,
682 disable_writeback_work);
683 struct mddev *mddev = log->rdev->mddev;
684 struct r5conf *conf = mddev->private;
685
686 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
687 return;
688 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
689 mdname(mddev));
690
691 /* wait superblock change before suspend */
692 wait_event(mddev->sb_wait,
693 !READ_ONCE(conf->log) ||
694 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
695
696 log = READ_ONCE(conf->log);
697 if (log) {
698 mddev_suspend(mddev, false);
699 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
700 mddev_resume(mddev);
701 }
702}
703
704static void r5l_submit_current_io(struct r5l_log *log)
705{
706 struct r5l_io_unit *io = log->current_io;
707 struct r5l_meta_block *block;
708 unsigned long flags;
709 u32 crc;
710 bool do_submit = true;
711
712 if (!io)
713 return;
714
715 block = page_address(io->meta_page);
716 block->meta_size = cpu_to_le32(io->meta_offset);
717 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
718 block->checksum = cpu_to_le32(crc);
719
720 log->current_io = NULL;
721 spin_lock_irqsave(&log->io_list_lock, flags);
722 if (io->has_flush || io->has_fua) {
723 if (io != list_first_entry(&log->running_ios,
724 struct r5l_io_unit, log_sibling)) {
725 io->io_deferred = 1;
726 do_submit = false;
727 }
728 }
729 spin_unlock_irqrestore(&log->io_list_lock, flags);
730 if (do_submit)
731 r5l_do_submit_io(log, io);
732}
733
734static struct bio *r5l_bio_alloc(struct r5l_log *log)
735{
736 struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
737 REQ_OP_WRITE, GFP_NOIO, &log->bs);
738
739 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
740
741 return bio;
742}
743
744static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
745{
746 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
747
748 r5c_update_log_state(log);
749 /*
750 * If we filled up the log device start from the beginning again,
751 * which will require a new bio.
752 *
753 * Note: for this to work properly the log size needs to me a multiple
754 * of BLOCK_SECTORS.
755 */
756 if (log->log_start == 0)
757 io->need_split_bio = true;
758
759 io->log_end = log->log_start;
760}
761
762static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
763{
764 struct r5l_io_unit *io;
765 struct r5l_meta_block *block;
766
767 io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
768 if (!io)
769 return NULL;
770 memset(io, 0, sizeof(*io));
771
772 io->log = log;
773 INIT_LIST_HEAD(&io->log_sibling);
774 INIT_LIST_HEAD(&io->stripe_list);
775 bio_list_init(&io->flush_barriers);
776 io->state = IO_UNIT_RUNNING;
777
778 io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
779 block = page_address(io->meta_page);
780 clear_page(block);
781 block->magic = cpu_to_le32(R5LOG_MAGIC);
782 block->version = R5LOG_VERSION;
783 block->seq = cpu_to_le64(log->seq);
784 block->position = cpu_to_le64(log->log_start);
785
786 io->log_start = log->log_start;
787 io->meta_offset = sizeof(struct r5l_meta_block);
788 io->seq = log->seq++;
789
790 io->current_bio = r5l_bio_alloc(log);
791 io->current_bio->bi_end_io = r5l_log_endio;
792 io->current_bio->bi_private = io;
793 __bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
794
795 r5_reserve_log_entry(log, io);
796
797 spin_lock_irq(&log->io_list_lock);
798 list_add_tail(&io->log_sibling, &log->running_ios);
799 spin_unlock_irq(&log->io_list_lock);
800
801 return io;
802}
803
804static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
805{
806 if (log->current_io &&
807 log->current_io->meta_offset + payload_size > PAGE_SIZE)
808 r5l_submit_current_io(log);
809
810 if (!log->current_io) {
811 log->current_io = r5l_new_meta(log);
812 if (!log->current_io)
813 return -ENOMEM;
814 }
815
816 return 0;
817}
818
819static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
820 sector_t location,
821 u32 checksum1, u32 checksum2,
822 bool checksum2_valid)
823{
824 struct r5l_io_unit *io = log->current_io;
825 struct r5l_payload_data_parity *payload;
826
827 payload = page_address(io->meta_page) + io->meta_offset;
828 payload->header.type = cpu_to_le16(type);
829 payload->header.flags = cpu_to_le16(0);
830 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
831 (PAGE_SHIFT - 9));
832 payload->location = cpu_to_le64(location);
833 payload->checksum[0] = cpu_to_le32(checksum1);
834 if (checksum2_valid)
835 payload->checksum[1] = cpu_to_le32(checksum2);
836
837 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
838 sizeof(__le32) * (1 + !!checksum2_valid);
839}
840
841static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
842{
843 struct r5l_io_unit *io = log->current_io;
844
845 if (io->need_split_bio) {
846 BUG_ON(io->split_bio);
847 io->split_bio = io->current_bio;
848 io->current_bio = r5l_bio_alloc(log);
849 bio_chain(io->current_bio, io->split_bio);
850 io->need_split_bio = false;
851 }
852
853 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
854 BUG();
855
856 r5_reserve_log_entry(log, io);
857}
858
859static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
860{
861 struct mddev *mddev = log->rdev->mddev;
862 struct r5conf *conf = mddev->private;
863 struct r5l_io_unit *io;
864 struct r5l_payload_flush *payload;
865 int meta_size;
866
867 /*
868 * payload_flush requires extra writes to the journal.
869 * To avoid handling the extra IO in quiesce, just skip
870 * flush_payload
871 */
872 if (conf->quiesce)
873 return;
874
875 mutex_lock(&log->io_mutex);
876 meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
877
878 if (r5l_get_meta(log, meta_size)) {
879 mutex_unlock(&log->io_mutex);
880 return;
881 }
882
883 /* current implementation is one stripe per flush payload */
884 io = log->current_io;
885 payload = page_address(io->meta_page) + io->meta_offset;
886 payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
887 payload->header.flags = cpu_to_le16(0);
888 payload->size = cpu_to_le32(sizeof(__le64));
889 payload->flush_stripes[0] = cpu_to_le64(sect);
890 io->meta_offset += meta_size;
891 /* multiple flush payloads count as one pending_stripe */
892 if (!io->has_flush_payload) {
893 io->has_flush_payload = 1;
894 atomic_inc(&io->pending_stripe);
895 }
896 mutex_unlock(&log->io_mutex);
897}
898
899static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
900 int data_pages, int parity_pages)
901{
902 int i;
903 int meta_size;
904 int ret;
905 struct r5l_io_unit *io;
906
907 meta_size =
908 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
909 * data_pages) +
910 sizeof(struct r5l_payload_data_parity) +
911 sizeof(__le32) * parity_pages;
912
913 ret = r5l_get_meta(log, meta_size);
914 if (ret)
915 return ret;
916
917 io = log->current_io;
918
919 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
920 io->has_flush = 1;
921
922 for (i = 0; i < sh->disks; i++) {
923 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
924 test_bit(R5_InJournal, &sh->dev[i].flags))
925 continue;
926 if (i == sh->pd_idx || i == sh->qd_idx)
927 continue;
928 if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
929 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
930 io->has_fua = 1;
931 /*
932 * we need to flush journal to make sure recovery can
933 * reach the data with fua flag
934 */
935 io->has_flush = 1;
936 }
937 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
938 raid5_compute_blocknr(sh, i, 0),
939 sh->dev[i].log_checksum, 0, false);
940 r5l_append_payload_page(log, sh->dev[i].page);
941 }
942
943 if (parity_pages == 2) {
944 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
945 sh->sector, sh->dev[sh->pd_idx].log_checksum,
946 sh->dev[sh->qd_idx].log_checksum, true);
947 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
948 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
949 } else if (parity_pages == 1) {
950 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
951 sh->sector, sh->dev[sh->pd_idx].log_checksum,
952 0, false);
953 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
954 } else /* Just writing data, not parity, in caching phase */
955 BUG_ON(parity_pages != 0);
956
957 list_add_tail(&sh->log_list, &io->stripe_list);
958 atomic_inc(&io->pending_stripe);
959 sh->log_io = io;
960
961 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
962 return 0;
963
964 if (sh->log_start == MaxSector) {
965 BUG_ON(!list_empty(&sh->r5c));
966 sh->log_start = io->log_start;
967 spin_lock_irq(&log->stripe_in_journal_lock);
968 list_add_tail(&sh->r5c,
969 &log->stripe_in_journal_list);
970 spin_unlock_irq(&log->stripe_in_journal_lock);
971 atomic_inc(&log->stripe_in_journal_count);
972 }
973 return 0;
974}
975
976/* add stripe to no_space_stripes, and then wake up reclaim */
977static inline void r5l_add_no_space_stripe(struct r5l_log *log,
978 struct stripe_head *sh)
979{
980 spin_lock(&log->no_space_stripes_lock);
981 list_add_tail(&sh->log_list, &log->no_space_stripes);
982 spin_unlock(&log->no_space_stripes_lock);
983}
984
985/*
986 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
987 * data from log to raid disks), so we shouldn't wait for reclaim here
988 */
989int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
990{
991 struct r5conf *conf = sh->raid_conf;
992 int write_disks = 0;
993 int data_pages, parity_pages;
994 int reserve;
995 int i;
996 int ret = 0;
997 bool wake_reclaim = false;
998
999 if (!log)
1000 return -EAGAIN;
1001 /* Don't support stripe batch */
1002 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1003 test_bit(STRIPE_SYNCING, &sh->state)) {
1004 /* the stripe is written to log, we start writing it to raid */
1005 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
1006 return -EAGAIN;
1007 }
1008
1009 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
1010
1011 for (i = 0; i < sh->disks; i++) {
1012 void *addr;
1013
1014 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
1015 test_bit(R5_InJournal, &sh->dev[i].flags))
1016 continue;
1017
1018 write_disks++;
1019 /* checksum is already calculated in last run */
1020 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1021 continue;
1022 addr = kmap_atomic(sh->dev[i].page);
1023 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1024 addr, PAGE_SIZE);
1025 kunmap_atomic(addr);
1026 }
1027 parity_pages = 1 + !!(sh->qd_idx >= 0);
1028 data_pages = write_disks - parity_pages;
1029
1030 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
1031 /*
1032 * The stripe must enter state machine again to finish the write, so
1033 * don't delay.
1034 */
1035 clear_bit(STRIPE_DELAYED, &sh->state);
1036 atomic_inc(&sh->count);
1037
1038 mutex_lock(&log->io_mutex);
1039 /* meta + data */
1040 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
1041
1042 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1043 if (!r5l_has_free_space(log, reserve)) {
1044 r5l_add_no_space_stripe(log, sh);
1045 wake_reclaim = true;
1046 } else {
1047 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1048 if (ret) {
1049 spin_lock_irq(&log->io_list_lock);
1050 list_add_tail(&sh->log_list,
1051 &log->no_mem_stripes);
1052 spin_unlock_irq(&log->io_list_lock);
1053 }
1054 }
1055 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1056 /*
1057 * log space critical, do not process stripes that are
1058 * not in cache yet (sh->log_start == MaxSector).
1059 */
1060 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1061 sh->log_start == MaxSector) {
1062 r5l_add_no_space_stripe(log, sh);
1063 wake_reclaim = true;
1064 reserve = 0;
1065 } else if (!r5l_has_free_space(log, reserve)) {
1066 if (sh->log_start == log->last_checkpoint)
1067 BUG();
1068 else
1069 r5l_add_no_space_stripe(log, sh);
1070 } else {
1071 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1072 if (ret) {
1073 spin_lock_irq(&log->io_list_lock);
1074 list_add_tail(&sh->log_list,
1075 &log->no_mem_stripes);
1076 spin_unlock_irq(&log->io_list_lock);
1077 }
1078 }
1079 }
1080
1081 mutex_unlock(&log->io_mutex);
1082 if (wake_reclaim)
1083 r5l_wake_reclaim(log, reserve);
1084 return 0;
1085}
1086
1087void r5l_write_stripe_run(struct r5l_log *log)
1088{
1089 if (!log)
1090 return;
1091 mutex_lock(&log->io_mutex);
1092 r5l_submit_current_io(log);
1093 mutex_unlock(&log->io_mutex);
1094}
1095
1096int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1097{
1098 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1099 /*
1100 * in write through (journal only)
1101 * we flush log disk cache first, then write stripe data to
1102 * raid disks. So if bio is finished, the log disk cache is
1103 * flushed already. The recovery guarantees we can recovery
1104 * the bio from log disk, so we don't need to flush again
1105 */
1106 if (bio->bi_iter.bi_size == 0) {
1107 bio_endio(bio);
1108 return 0;
1109 }
1110 bio->bi_opf &= ~REQ_PREFLUSH;
1111 } else {
1112 /* write back (with cache) */
1113 if (bio->bi_iter.bi_size == 0) {
1114 mutex_lock(&log->io_mutex);
1115 r5l_get_meta(log, 0);
1116 bio_list_add(&log->current_io->flush_barriers, bio);
1117 log->current_io->has_flush = 1;
1118 log->current_io->has_null_flush = 1;
1119 atomic_inc(&log->current_io->pending_stripe);
1120 r5l_submit_current_io(log);
1121 mutex_unlock(&log->io_mutex);
1122 return 0;
1123 }
1124 }
1125 return -EAGAIN;
1126}
1127
1128/* This will run after log space is reclaimed */
1129static void r5l_run_no_space_stripes(struct r5l_log *log)
1130{
1131 struct stripe_head *sh;
1132
1133 spin_lock(&log->no_space_stripes_lock);
1134 while (!list_empty(&log->no_space_stripes)) {
1135 sh = list_first_entry(&log->no_space_stripes,
1136 struct stripe_head, log_list);
1137 list_del_init(&sh->log_list);
1138 set_bit(STRIPE_HANDLE, &sh->state);
1139 raid5_release_stripe(sh);
1140 }
1141 spin_unlock(&log->no_space_stripes_lock);
1142}
1143
1144/*
1145 * calculate new last_checkpoint
1146 * for write through mode, returns log->next_checkpoint
1147 * for write back, returns log_start of first sh in stripe_in_journal_list
1148 */
1149static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1150{
1151 struct stripe_head *sh;
1152 struct r5l_log *log = READ_ONCE(conf->log);
1153 sector_t new_cp;
1154 unsigned long flags;
1155
1156 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1157 return log->next_checkpoint;
1158
1159 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1160 if (list_empty(&log->stripe_in_journal_list)) {
1161 /* all stripes flushed */
1162 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1163 return log->next_checkpoint;
1164 }
1165 sh = list_first_entry(&log->stripe_in_journal_list,
1166 struct stripe_head, r5c);
1167 new_cp = sh->log_start;
1168 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1169 return new_cp;
1170}
1171
1172static sector_t r5l_reclaimable_space(struct r5l_log *log)
1173{
1174 struct r5conf *conf = log->rdev->mddev->private;
1175
1176 return r5l_ring_distance(log, log->last_checkpoint,
1177 r5c_calculate_new_cp(conf));
1178}
1179
1180static void r5l_run_no_mem_stripe(struct r5l_log *log)
1181{
1182 struct stripe_head *sh;
1183
1184 lockdep_assert_held(&log->io_list_lock);
1185
1186 if (!list_empty(&log->no_mem_stripes)) {
1187 sh = list_first_entry(&log->no_mem_stripes,
1188 struct stripe_head, log_list);
1189 list_del_init(&sh->log_list);
1190 set_bit(STRIPE_HANDLE, &sh->state);
1191 raid5_release_stripe(sh);
1192 }
1193}
1194
1195static bool r5l_complete_finished_ios(struct r5l_log *log)
1196{
1197 struct r5l_io_unit *io, *next;
1198 bool found = false;
1199
1200 lockdep_assert_held(&log->io_list_lock);
1201
1202 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1203 /* don't change list order */
1204 if (io->state < IO_UNIT_STRIPE_END)
1205 break;
1206
1207 log->next_checkpoint = io->log_start;
1208
1209 list_del(&io->log_sibling);
1210 mempool_free(io, &log->io_pool);
1211 r5l_run_no_mem_stripe(log);
1212
1213 found = true;
1214 }
1215
1216 return found;
1217}
1218
1219static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1220{
1221 struct r5l_log *log = io->log;
1222 struct r5conf *conf = log->rdev->mddev->private;
1223 unsigned long flags;
1224
1225 spin_lock_irqsave(&log->io_list_lock, flags);
1226 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
1227
1228 if (!r5l_complete_finished_ios(log)) {
1229 spin_unlock_irqrestore(&log->io_list_lock, flags);
1230 return;
1231 }
1232
1233 if (r5l_reclaimable_space(log) > log->max_free_space ||
1234 test_bit(R5C_LOG_TIGHT, &conf->cache_state))
1235 r5l_wake_reclaim(log, 0);
1236
1237 spin_unlock_irqrestore(&log->io_list_lock, flags);
1238 wake_up(&log->iounit_wait);
1239}
1240
1241void r5l_stripe_write_finished(struct stripe_head *sh)
1242{
1243 struct r5l_io_unit *io;
1244
1245 io = sh->log_io;
1246 sh->log_io = NULL;
1247
1248 if (io && atomic_dec_and_test(&io->pending_stripe))
1249 __r5l_stripe_write_finished(io);
1250}
1251
1252static void r5l_log_flush_endio(struct bio *bio)
1253{
1254 struct r5l_log *log = container_of(bio, struct r5l_log,
1255 flush_bio);
1256 unsigned long flags;
1257 struct r5l_io_unit *io;
1258
1259 if (bio->bi_status)
1260 md_error(log->rdev->mddev, log->rdev);
1261 bio_uninit(bio);
1262
1263 spin_lock_irqsave(&log->io_list_lock, flags);
1264 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1265 r5l_io_run_stripes(io);
1266 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1267 spin_unlock_irqrestore(&log->io_list_lock, flags);
1268}
1269
1270/*
1271 * Starting dispatch IO to raid.
1272 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1273 * broken meta in the middle of a log causes recovery can't find meta at the
1274 * head of log. If operations require meta at the head persistent in log, we
1275 * must make sure meta before it persistent in log too. A case is:
1276 *
1277 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1278 * data/parity must be persistent in log before we do the write to raid disks.
1279 *
1280 * The solution is we restrictly maintain io_unit list order. In this case, we
1281 * only write stripes of an io_unit to raid disks till the io_unit is the first
1282 * one whose data/parity is in log.
1283 */
1284void r5l_flush_stripe_to_raid(struct r5l_log *log)
1285{
1286 bool do_flush;
1287
1288 if (!log || !log->need_cache_flush)
1289 return;
1290
1291 spin_lock_irq(&log->io_list_lock);
1292 /* flush bio is running */
1293 if (!list_empty(&log->flushing_ios)) {
1294 spin_unlock_irq(&log->io_list_lock);
1295 return;
1296 }
1297 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1298 do_flush = !list_empty(&log->flushing_ios);
1299 spin_unlock_irq(&log->io_list_lock);
1300
1301 if (!do_flush)
1302 return;
1303 bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1304 REQ_OP_WRITE | REQ_PREFLUSH);
1305 log->flush_bio.bi_end_io = r5l_log_flush_endio;
1306 submit_bio(&log->flush_bio);
1307}
1308
1309static void r5l_write_super(struct r5l_log *log, sector_t cp);
1310static void r5l_write_super_and_discard_space(struct r5l_log *log,
1311 sector_t end)
1312{
1313 struct block_device *bdev = log->rdev->bdev;
1314 struct mddev *mddev;
1315
1316 r5l_write_super(log, end);
1317
1318 if (!bdev_max_discard_sectors(bdev))
1319 return;
1320
1321 mddev = log->rdev->mddev;
1322 /*
1323 * Discard could zero data, so before discard we must make sure
1324 * superblock is updated to new log tail. Updating superblock (either
1325 * directly call md_update_sb() or depend on md thread) must hold
1326 * reconfig mutex. On the other hand, raid5_quiesce is called with
1327 * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
1328 * for all IO finish, hence waiting for reclaim thread, while reclaim
1329 * thread is calling this function and waiting for reconfig mutex. So
1330 * there is a deadlock. We workaround this issue with a trylock.
1331 * FIXME: we could miss discard if we can't take reconfig mutex
1332 */
1333 set_mask_bits(&mddev->sb_flags, 0,
1334 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1335 if (!mddev_trylock(mddev))
1336 return;
1337 md_update_sb(mddev, 1);
1338 mddev_unlock(mddev);
1339
1340 /* discard IO error really doesn't matter, ignore it */
1341 if (log->last_checkpoint < end) {
1342 blkdev_issue_discard(bdev,
1343 log->last_checkpoint + log->rdev->data_offset,
1344 end - log->last_checkpoint, GFP_NOIO);
1345 } else {
1346 blkdev_issue_discard(bdev,
1347 log->last_checkpoint + log->rdev->data_offset,
1348 log->device_size - log->last_checkpoint,
1349 GFP_NOIO);
1350 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1351 GFP_NOIO);
1352 }
1353}
1354
1355/*
1356 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1357 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1358 *
1359 * must hold conf->device_lock
1360 */
1361static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1362{
1363 BUG_ON(list_empty(&sh->lru));
1364 BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1365 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1366
1367 /*
1368 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1369 * raid5_release_stripe() while holding conf->device_lock
1370 */
1371 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1372 lockdep_assert_held(&conf->device_lock);
1373
1374 list_del_init(&sh->lru);
1375 atomic_inc(&sh->count);
1376
1377 set_bit(STRIPE_HANDLE, &sh->state);
1378 atomic_inc(&conf->active_stripes);
1379 r5c_make_stripe_write_out(sh);
1380
1381 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1382 atomic_inc(&conf->r5c_flushing_partial_stripes);
1383 else
1384 atomic_inc(&conf->r5c_flushing_full_stripes);
1385 raid5_release_stripe(sh);
1386}
1387
1388/*
1389 * if num == 0, flush all full stripes
1390 * if num > 0, flush all full stripes. If less than num full stripes are
1391 * flushed, flush some partial stripes until totally num stripes are
1392 * flushed or there is no more cached stripes.
1393 */
1394void r5c_flush_cache(struct r5conf *conf, int num)
1395{
1396 int count;
1397 struct stripe_head *sh, *next;
1398
1399 lockdep_assert_held(&conf->device_lock);
1400 if (!READ_ONCE(conf->log))
1401 return;
1402
1403 count = 0;
1404 list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1405 r5c_flush_stripe(conf, sh);
1406 count++;
1407 }
1408
1409 if (count >= num)
1410 return;
1411 list_for_each_entry_safe(sh, next,
1412 &conf->r5c_partial_stripe_list, lru) {
1413 r5c_flush_stripe(conf, sh);
1414 if (++count >= num)
1415 break;
1416 }
1417}
1418
1419static void r5c_do_reclaim(struct r5conf *conf)
1420{
1421 struct r5l_log *log = READ_ONCE(conf->log);
1422 struct stripe_head *sh;
1423 int count = 0;
1424 unsigned long flags;
1425 int total_cached;
1426 int stripes_to_flush;
1427 int flushing_partial, flushing_full;
1428
1429 if (!r5c_is_writeback(log))
1430 return;
1431
1432 flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1433 flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
1434 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
1435 atomic_read(&conf->r5c_cached_full_stripes) -
1436 flushing_full - flushing_partial;
1437
1438 if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1439 atomic_read(&conf->empty_inactive_list_nr) > 0)
1440 /*
1441 * if stripe cache pressure high, flush all full stripes and
1442 * some partial stripes
1443 */
1444 stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1445 else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
1446 atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
1447 R5C_FULL_STRIPE_FLUSH_BATCH(conf))
1448 /*
1449 * if stripe cache pressure moderate, or if there is many full
1450 * stripes,flush all full stripes
1451 */
1452 stripes_to_flush = 0;
1453 else
1454 /* no need to flush */
1455 stripes_to_flush = -1;
1456
1457 if (stripes_to_flush >= 0) {
1458 spin_lock_irqsave(&conf->device_lock, flags);
1459 r5c_flush_cache(conf, stripes_to_flush);
1460 spin_unlock_irqrestore(&conf->device_lock, flags);
1461 }
1462
1463 /* if log space is tight, flush stripes on stripe_in_journal_list */
1464 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1465 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1466 spin_lock(&conf->device_lock);
1467 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1468 /*
1469 * stripes on stripe_in_journal_list could be in any
1470 * state of the stripe_cache state machine. In this
1471 * case, we only want to flush stripe on
1472 * r5c_cached_full/partial_stripes. The following
1473 * condition makes sure the stripe is on one of the
1474 * two lists.
1475 */
1476 if (!list_empty(&sh->lru) &&
1477 !test_bit(STRIPE_HANDLE, &sh->state) &&
1478 atomic_read(&sh->count) == 0) {
1479 r5c_flush_stripe(conf, sh);
1480 if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1481 break;
1482 }
1483 }
1484 spin_unlock(&conf->device_lock);
1485 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1486 }
1487
1488 if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1489 r5l_run_no_space_stripes(log);
1490
1491 md_wakeup_thread(conf->mddev->thread);
1492}
1493
1494static void r5l_do_reclaim(struct r5l_log *log)
1495{
1496 struct r5conf *conf = log->rdev->mddev->private;
1497 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1498 sector_t reclaimable;
1499 sector_t next_checkpoint;
1500 bool write_super;
1501
1502 spin_lock_irq(&log->io_list_lock);
1503 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1504 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1505 /*
1506 * move proper io_unit to reclaim list. We should not change the order.
1507 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1508 * shouldn't reuse space of an unreclaimable io_unit
1509 */
1510 while (1) {
1511 reclaimable = r5l_reclaimable_space(log);
1512 if (reclaimable >= reclaim_target ||
1513 (list_empty(&log->running_ios) &&
1514 list_empty(&log->io_end_ios) &&
1515 list_empty(&log->flushing_ios) &&
1516 list_empty(&log->finished_ios)))
1517 break;
1518
1519 md_wakeup_thread(log->rdev->mddev->thread);
1520 wait_event_lock_irq(log->iounit_wait,
1521 r5l_reclaimable_space(log) > reclaimable,
1522 log->io_list_lock);
1523 }
1524
1525 next_checkpoint = r5c_calculate_new_cp(conf);
1526 spin_unlock_irq(&log->io_list_lock);
1527
1528 if (reclaimable == 0 || !write_super)
1529 return;
1530
1531 /*
1532 * write_super will flush cache of each raid disk. We must write super
1533 * here, because the log area might be reused soon and we don't want to
1534 * confuse recovery
1535 */
1536 r5l_write_super_and_discard_space(log, next_checkpoint);
1537
1538 mutex_lock(&log->io_mutex);
1539 log->last_checkpoint = next_checkpoint;
1540 r5c_update_log_state(log);
1541 mutex_unlock(&log->io_mutex);
1542
1543 r5l_run_no_space_stripes(log);
1544}
1545
1546static void r5l_reclaim_thread(struct md_thread *thread)
1547{
1548 struct mddev *mddev = thread->mddev;
1549 struct r5conf *conf = mddev->private;
1550 struct r5l_log *log = READ_ONCE(conf->log);
1551
1552 if (!log)
1553 return;
1554 r5c_do_reclaim(conf);
1555 r5l_do_reclaim(log);
1556}
1557
1558void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1559{
1560 unsigned long target;
1561 unsigned long new = (unsigned long)space; /* overflow in theory */
1562
1563 if (!log)
1564 return;
1565
1566 target = READ_ONCE(log->reclaim_target);
1567 do {
1568 if (new < target)
1569 return;
1570 } while (!try_cmpxchg(&log->reclaim_target, &target, new));
1571 md_wakeup_thread(log->reclaim_thread);
1572}
1573
1574void r5l_quiesce(struct r5l_log *log, int quiesce)
1575{
1576 struct mddev *mddev = log->rdev->mddev;
1577 struct md_thread *thread = rcu_dereference_protected(
1578 log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1579
1580 if (quiesce) {
1581 /* make sure r5l_write_super_and_discard_space exits */
1582 wake_up(&mddev->sb_wait);
1583 kthread_park(thread->tsk);
1584 r5l_wake_reclaim(log, MaxSector);
1585 r5l_do_reclaim(log);
1586 } else
1587 kthread_unpark(thread->tsk);
1588}
1589
1590bool r5l_log_disk_error(struct r5conf *conf)
1591{
1592 struct r5l_log *log = READ_ONCE(conf->log);
1593
1594 /* don't allow write if journal disk is missing */
1595 if (!log)
1596 return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1597 else
1598 return test_bit(Faulty, &log->rdev->flags);
1599}
1600
1601#define R5L_RECOVERY_PAGE_POOL_SIZE 256
1602
1603struct r5l_recovery_ctx {
1604 struct page *meta_page; /* current meta */
1605 sector_t meta_total_blocks; /* total size of current meta and data */
1606 sector_t pos; /* recovery position */
1607 u64 seq; /* recovery position seq */
1608 int data_parity_stripes; /* number of data_parity stripes */
1609 int data_only_stripes; /* number of data_only stripes */
1610 struct list_head cached_list;
1611
1612 /*
1613 * read ahead page pool (ra_pool)
1614 * in recovery, log is read sequentially. It is not efficient to
1615 * read every page with sync_page_io(). The read ahead page pool
1616 * reads multiple pages with one IO, so further log read can
1617 * just copy data from the pool.
1618 */
1619 struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
1620 struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
1621 sector_t pool_offset; /* offset of first page in the pool */
1622 int total_pages; /* total allocated pages */
1623 int valid_pages; /* pages with valid data */
1624};
1625
1626static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1627 struct r5l_recovery_ctx *ctx)
1628{
1629 struct page *page;
1630
1631 ctx->valid_pages = 0;
1632 ctx->total_pages = 0;
1633 while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1634 page = alloc_page(GFP_KERNEL);
1635
1636 if (!page)
1637 break;
1638 ctx->ra_pool[ctx->total_pages] = page;
1639 ctx->total_pages += 1;
1640 }
1641
1642 if (ctx->total_pages == 0)
1643 return -ENOMEM;
1644
1645 ctx->pool_offset = 0;
1646 return 0;
1647}
1648
1649static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1650 struct r5l_recovery_ctx *ctx)
1651{
1652 int i;
1653
1654 for (i = 0; i < ctx->total_pages; ++i)
1655 put_page(ctx->ra_pool[i]);
1656}
1657
1658/*
1659 * fetch ctx->valid_pages pages from offset
1660 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1661 * However, if the offset is close to the end of the journal device,
1662 * ctx->valid_pages could be smaller than ctx->total_pages
1663 */
1664static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1665 struct r5l_recovery_ctx *ctx,
1666 sector_t offset)
1667{
1668 struct bio bio;
1669 int ret;
1670
1671 bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1672 R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
1673 bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1674
1675 ctx->valid_pages = 0;
1676 ctx->pool_offset = offset;
1677
1678 while (ctx->valid_pages < ctx->total_pages) {
1679 __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
1680 0);
1681 ctx->valid_pages += 1;
1682
1683 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1684
1685 if (offset == 0) /* reached end of the device */
1686 break;
1687 }
1688
1689 ret = submit_bio_wait(&bio);
1690 bio_uninit(&bio);
1691 return ret;
1692}
1693
1694/*
1695 * try read a page from the read ahead page pool, if the page is not in the
1696 * pool, call r5l_recovery_fetch_ra_pool
1697 */
1698static int r5l_recovery_read_page(struct r5l_log *log,
1699 struct r5l_recovery_ctx *ctx,
1700 struct page *page,
1701 sector_t offset)
1702{
1703 int ret;
1704
1705 if (offset < ctx->pool_offset ||
1706 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1707 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1708 if (ret)
1709 return ret;
1710 }
1711
1712 BUG_ON(offset < ctx->pool_offset ||
1713 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1714
1715 memcpy(page_address(page),
1716 page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1717 BLOCK_SECTOR_SHIFT]),
1718 PAGE_SIZE);
1719 return 0;
1720}
1721
1722static int r5l_recovery_read_meta_block(struct r5l_log *log,
1723 struct r5l_recovery_ctx *ctx)
1724{
1725 struct page *page = ctx->meta_page;
1726 struct r5l_meta_block *mb;
1727 u32 crc, stored_crc;
1728 int ret;
1729
1730 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1731 if (ret != 0)
1732 return ret;
1733
1734 mb = page_address(page);
1735 stored_crc = le32_to_cpu(mb->checksum);
1736 mb->checksum = 0;
1737
1738 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1739 le64_to_cpu(mb->seq) != ctx->seq ||
1740 mb->version != R5LOG_VERSION ||
1741 le64_to_cpu(mb->position) != ctx->pos)
1742 return -EINVAL;
1743
1744 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1745 if (stored_crc != crc)
1746 return -EINVAL;
1747
1748 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1749 return -EINVAL;
1750
1751 ctx->meta_total_blocks = BLOCK_SECTORS;
1752
1753 return 0;
1754}
1755
1756static void
1757r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1758 struct page *page,
1759 sector_t pos, u64 seq)
1760{
1761 struct r5l_meta_block *mb;
1762
1763 mb = page_address(page);
1764 clear_page(mb);
1765 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1766 mb->version = R5LOG_VERSION;
1767 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1768 mb->seq = cpu_to_le64(seq);
1769 mb->position = cpu_to_le64(pos);
1770}
1771
1772static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1773 u64 seq)
1774{
1775 struct page *page;
1776 struct r5l_meta_block *mb;
1777
1778 page = alloc_page(GFP_KERNEL);
1779 if (!page)
1780 return -ENOMEM;
1781 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1782 mb = page_address(page);
1783 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1784 mb, PAGE_SIZE));
1785 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
1786 REQ_SYNC | REQ_FUA, false)) {
1787 __free_page(page);
1788 return -EIO;
1789 }
1790 __free_page(page);
1791 return 0;
1792}
1793
1794/*
1795 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1796 * to mark valid (potentially not flushed) data in the journal.
1797 *
1798 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1799 * so there should not be any mismatch here.
1800 */
1801static void r5l_recovery_load_data(struct r5l_log *log,
1802 struct stripe_head *sh,
1803 struct r5l_recovery_ctx *ctx,
1804 struct r5l_payload_data_parity *payload,
1805 sector_t log_offset)
1806{
1807 struct mddev *mddev = log->rdev->mddev;
1808 struct r5conf *conf = mddev->private;
1809 int dd_idx;
1810
1811 raid5_compute_sector(conf,
1812 le64_to_cpu(payload->location), 0,
1813 &dd_idx, sh);
1814 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1815 sh->dev[dd_idx].log_checksum =
1816 le32_to_cpu(payload->checksum[0]);
1817 ctx->meta_total_blocks += BLOCK_SECTORS;
1818
1819 set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1820 set_bit(STRIPE_R5C_CACHING, &sh->state);
1821}
1822
1823static void r5l_recovery_load_parity(struct r5l_log *log,
1824 struct stripe_head *sh,
1825 struct r5l_recovery_ctx *ctx,
1826 struct r5l_payload_data_parity *payload,
1827 sector_t log_offset)
1828{
1829 struct mddev *mddev = log->rdev->mddev;
1830 struct r5conf *conf = mddev->private;
1831
1832 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1833 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1834 sh->dev[sh->pd_idx].log_checksum =
1835 le32_to_cpu(payload->checksum[0]);
1836 set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1837
1838 if (sh->qd_idx >= 0) {
1839 r5l_recovery_read_page(
1840 log, ctx, sh->dev[sh->qd_idx].page,
1841 r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1842 sh->dev[sh->qd_idx].log_checksum =
1843 le32_to_cpu(payload->checksum[1]);
1844 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1845 }
1846 clear_bit(STRIPE_R5C_CACHING, &sh->state);
1847}
1848
1849static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1850{
1851 int i;
1852
1853 sh->state = 0;
1854 sh->log_start = MaxSector;
1855 for (i = sh->disks; i--; )
1856 sh->dev[i].flags = 0;
1857}
1858
1859static void
1860r5l_recovery_replay_one_stripe(struct r5conf *conf,
1861 struct stripe_head *sh,
1862 struct r5l_recovery_ctx *ctx)
1863{
1864 struct md_rdev *rdev, *rrdev;
1865 int disk_index;
1866 int data_count = 0;
1867
1868 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1869 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1870 continue;
1871 if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1872 continue;
1873 data_count++;
1874 }
1875
1876 /*
1877 * stripes that only have parity must have been flushed
1878 * before the crash that we are now recovering from, so
1879 * there is nothing more to recovery.
1880 */
1881 if (data_count == 0)
1882 goto out;
1883
1884 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1885 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1886 continue;
1887
1888 /* in case device is broken */
1889 rdev = conf->disks[disk_index].rdev;
1890 if (rdev) {
1891 atomic_inc(&rdev->nr_pending);
1892 sync_page_io(rdev, sh->sector, PAGE_SIZE,
1893 sh->dev[disk_index].page, REQ_OP_WRITE,
1894 false);
1895 rdev_dec_pending(rdev, rdev->mddev);
1896 }
1897 rrdev = conf->disks[disk_index].replacement;
1898 if (rrdev) {
1899 atomic_inc(&rrdev->nr_pending);
1900 sync_page_io(rrdev, sh->sector, PAGE_SIZE,
1901 sh->dev[disk_index].page, REQ_OP_WRITE,
1902 false);
1903 rdev_dec_pending(rrdev, rrdev->mddev);
1904 }
1905 }
1906 ctx->data_parity_stripes++;
1907out:
1908 r5l_recovery_reset_stripe(sh);
1909}
1910
1911static struct stripe_head *
1912r5c_recovery_alloc_stripe(
1913 struct r5conf *conf,
1914 sector_t stripe_sect,
1915 int noblock)
1916{
1917 struct stripe_head *sh;
1918
1919 sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
1920 noblock ? R5_GAS_NOBLOCK : 0);
1921 if (!sh)
1922 return NULL; /* no more stripe available */
1923
1924 r5l_recovery_reset_stripe(sh);
1925
1926 return sh;
1927}
1928
1929static struct stripe_head *
1930r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1931{
1932 struct stripe_head *sh;
1933
1934 list_for_each_entry(sh, list, lru)
1935 if (sh->sector == sect)
1936 return sh;
1937 return NULL;
1938}
1939
1940static void
1941r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1942 struct r5l_recovery_ctx *ctx)
1943{
1944 struct stripe_head *sh, *next;
1945
1946 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1947 r5l_recovery_reset_stripe(sh);
1948 list_del_init(&sh->lru);
1949 raid5_release_stripe(sh);
1950 }
1951}
1952
1953static void
1954r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1955 struct r5l_recovery_ctx *ctx)
1956{
1957 struct stripe_head *sh, *next;
1958
1959 list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1960 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1961 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1962 list_del_init(&sh->lru);
1963 raid5_release_stripe(sh);
1964 }
1965}
1966
1967/* if matches return 0; otherwise return -EINVAL */
1968static int
1969r5l_recovery_verify_data_checksum(struct r5l_log *log,
1970 struct r5l_recovery_ctx *ctx,
1971 struct page *page,
1972 sector_t log_offset, __le32 log_checksum)
1973{
1974 void *addr;
1975 u32 checksum;
1976
1977 r5l_recovery_read_page(log, ctx, page, log_offset);
1978 addr = kmap_atomic(page);
1979 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1980 kunmap_atomic(addr);
1981 return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1982}
1983
1984/*
1985 * before loading data to stripe cache, we need verify checksum for all data,
1986 * if there is mismatch for any data page, we drop all data in the mata block
1987 */
1988static int
1989r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1990 struct r5l_recovery_ctx *ctx)
1991{
1992 struct mddev *mddev = log->rdev->mddev;
1993 struct r5conf *conf = mddev->private;
1994 struct r5l_meta_block *mb = page_address(ctx->meta_page);
1995 sector_t mb_offset = sizeof(struct r5l_meta_block);
1996 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
1997 struct page *page;
1998 struct r5l_payload_data_parity *payload;
1999 struct r5l_payload_flush *payload_flush;
2000
2001 page = alloc_page(GFP_KERNEL);
2002 if (!page)
2003 return -ENOMEM;
2004
2005 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2006 payload = (void *)mb + mb_offset;
2007 payload_flush = (void *)mb + mb_offset;
2008
2009 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2010 if (r5l_recovery_verify_data_checksum(
2011 log, ctx, page, log_offset,
2012 payload->checksum[0]) < 0)
2013 goto mismatch;
2014 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2015 if (r5l_recovery_verify_data_checksum(
2016 log, ctx, page, log_offset,
2017 payload->checksum[0]) < 0)
2018 goto mismatch;
2019 if (conf->max_degraded == 2 && /* q for RAID 6 */
2020 r5l_recovery_verify_data_checksum(
2021 log, ctx, page,
2022 r5l_ring_add(log, log_offset,
2023 BLOCK_SECTORS),
2024 payload->checksum[1]) < 0)
2025 goto mismatch;
2026 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2027 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2028 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2029 goto mismatch;
2030
2031 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2032 mb_offset += sizeof(struct r5l_payload_flush) +
2033 le32_to_cpu(payload_flush->size);
2034 } else {
2035 /* DATA or PARITY payload */
2036 log_offset = r5l_ring_add(log, log_offset,
2037 le32_to_cpu(payload->size));
2038 mb_offset += sizeof(struct r5l_payload_data_parity) +
2039 sizeof(__le32) *
2040 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2041 }
2042
2043 }
2044
2045 put_page(page);
2046 return 0;
2047
2048mismatch:
2049 put_page(page);
2050 return -EINVAL;
2051}
2052
2053/*
2054 * Analyze all data/parity pages in one meta block
2055 * Returns:
2056 * 0 for success
2057 * -EINVAL for unknown playload type
2058 * -EAGAIN for checksum mismatch of data page
2059 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2060 */
2061static int
2062r5c_recovery_analyze_meta_block(struct r5l_log *log,
2063 struct r5l_recovery_ctx *ctx,
2064 struct list_head *cached_stripe_list)
2065{
2066 struct mddev *mddev = log->rdev->mddev;
2067 struct r5conf *conf = mddev->private;
2068 struct r5l_meta_block *mb;
2069 struct r5l_payload_data_parity *payload;
2070 struct r5l_payload_flush *payload_flush;
2071 int mb_offset;
2072 sector_t log_offset;
2073 sector_t stripe_sect;
2074 struct stripe_head *sh;
2075 int ret;
2076
2077 /*
2078 * for mismatch in data blocks, we will drop all data in this mb, but
2079 * we will still read next mb for other data with FLUSH flag, as
2080 * io_unit could finish out of order.
2081 */
2082 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2083 if (ret == -EINVAL)
2084 return -EAGAIN;
2085 else if (ret)
2086 return ret; /* -ENOMEM duo to alloc_page() failed */
2087
2088 mb = page_address(ctx->meta_page);
2089 mb_offset = sizeof(struct r5l_meta_block);
2090 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2091
2092 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2093 int dd;
2094
2095 payload = (void *)mb + mb_offset;
2096 payload_flush = (void *)mb + mb_offset;
2097
2098 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2099 int i, count;
2100
2101 count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
2102 for (i = 0; i < count; ++i) {
2103 stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
2104 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2105 stripe_sect);
2106 if (sh) {
2107 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2108 r5l_recovery_reset_stripe(sh);
2109 list_del_init(&sh->lru);
2110 raid5_release_stripe(sh);
2111 }
2112 }
2113
2114 mb_offset += sizeof(struct r5l_payload_flush) +
2115 le32_to_cpu(payload_flush->size);
2116 continue;
2117 }
2118
2119 /* DATA or PARITY payload */
2120 stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2121 raid5_compute_sector(
2122 conf, le64_to_cpu(payload->location), 0, &dd,
2123 NULL)
2124 : le64_to_cpu(payload->location);
2125
2126 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2127 stripe_sect);
2128
2129 if (!sh) {
2130 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2131 /*
2132 * cannot get stripe from raid5_get_active_stripe
2133 * try replay some stripes
2134 */
2135 if (!sh) {
2136 r5c_recovery_replay_stripes(
2137 cached_stripe_list, ctx);
2138 sh = r5c_recovery_alloc_stripe(
2139 conf, stripe_sect, 1);
2140 }
2141 if (!sh) {
2142 int new_size = conf->min_nr_stripes * 2;
2143 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2144 mdname(mddev),
2145 new_size);
2146 ret = raid5_set_cache_size(mddev, new_size);
2147 if (conf->min_nr_stripes <= new_size / 2) {
2148 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2149 mdname(mddev),
2150 ret,
2151 new_size,
2152 conf->min_nr_stripes,
2153 conf->max_nr_stripes);
2154 return -ENOMEM;
2155 }
2156 sh = r5c_recovery_alloc_stripe(
2157 conf, stripe_sect, 0);
2158 }
2159 if (!sh) {
2160 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2161 mdname(mddev));
2162 return -ENOMEM;
2163 }
2164 list_add_tail(&sh->lru, cached_stripe_list);
2165 }
2166
2167 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2168 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2169 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2170 r5l_recovery_replay_one_stripe(conf, sh, ctx);
2171 list_move_tail(&sh->lru, cached_stripe_list);
2172 }
2173 r5l_recovery_load_data(log, sh, ctx, payload,
2174 log_offset);
2175 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2176 r5l_recovery_load_parity(log, sh, ctx, payload,
2177 log_offset);
2178 else
2179 return -EINVAL;
2180
2181 log_offset = r5l_ring_add(log, log_offset,
2182 le32_to_cpu(payload->size));
2183
2184 mb_offset += sizeof(struct r5l_payload_data_parity) +
2185 sizeof(__le32) *
2186 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2187 }
2188
2189 return 0;
2190}
2191
2192/*
2193 * Load the stripe into cache. The stripe will be written out later by
2194 * the stripe cache state machine.
2195 */
2196static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2197 struct stripe_head *sh)
2198{
2199 struct r5dev *dev;
2200 int i;
2201
2202 for (i = sh->disks; i--; ) {
2203 dev = sh->dev + i;
2204 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2205 set_bit(R5_InJournal, &dev->flags);
2206 set_bit(R5_UPTODATE, &dev->flags);
2207 }
2208 }
2209}
2210
2211/*
2212 * Scan through the log for all to-be-flushed data
2213 *
2214 * For stripes with data and parity, namely Data-Parity stripe
2215 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2216 *
2217 * For stripes with only data, namely Data-Only stripe
2218 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2219 *
2220 * For a stripe, if we see data after parity, we should discard all previous
2221 * data and parity for this stripe, as these data are already flushed to
2222 * the array.
2223 *
2224 * At the end of the scan, we return the new journal_tail, which points to
2225 * first data-only stripe on the journal device, or next invalid meta block.
2226 */
2227static int r5c_recovery_flush_log(struct r5l_log *log,
2228 struct r5l_recovery_ctx *ctx)
2229{
2230 struct stripe_head *sh;
2231 int ret = 0;
2232
2233 /* scan through the log */
2234 while (1) {
2235 if (r5l_recovery_read_meta_block(log, ctx))
2236 break;
2237
2238 ret = r5c_recovery_analyze_meta_block(log, ctx,
2239 &ctx->cached_list);
2240 /*
2241 * -EAGAIN means mismatch in data block, in this case, we still
2242 * try scan the next metablock
2243 */
2244 if (ret && ret != -EAGAIN)
2245 break; /* ret == -EINVAL or -ENOMEM */
2246 ctx->seq++;
2247 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2248 }
2249
2250 if (ret == -ENOMEM) {
2251 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2252 return ret;
2253 }
2254
2255 /* replay data-parity stripes */
2256 r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2257
2258 /* load data-only stripes to stripe cache */
2259 list_for_each_entry(sh, &ctx->cached_list, lru) {
2260 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2261 r5c_recovery_load_one_stripe(log, sh);
2262 ctx->data_only_stripes++;
2263 }
2264
2265 return 0;
2266}
2267
2268/*
2269 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2270 * log will start here. but we can't let superblock point to last valid
2271 * meta block. The log might looks like:
2272 * | meta 1| meta 2| meta 3|
2273 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2274 * superblock points to meta 1, we write a new valid meta 2n. if crash
2275 * happens again, new recovery will start from meta 1. Since meta 2n is
2276 * valid now, recovery will think meta 3 is valid, which is wrong.
2277 * The solution is we create a new meta in meta2 with its seq == meta
2278 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2279 * will not think meta 3 is a valid meta, because its seq doesn't match
2280 */
2281
2282/*
2283 * Before recovery, the log looks like the following
2284 *
2285 * ---------------------------------------------
2286 * | valid log | invalid log |
2287 * ---------------------------------------------
2288 * ^
2289 * |- log->last_checkpoint
2290 * |- log->last_cp_seq
2291 *
2292 * Now we scan through the log until we see invalid entry
2293 *
2294 * ---------------------------------------------
2295 * | valid log | invalid log |
2296 * ---------------------------------------------
2297 * ^ ^
2298 * |- log->last_checkpoint |- ctx->pos
2299 * |- log->last_cp_seq |- ctx->seq
2300 *
2301 * From this point, we need to increase seq number by 10 to avoid
2302 * confusing next recovery.
2303 *
2304 * ---------------------------------------------
2305 * | valid log | invalid log |
2306 * ---------------------------------------------
2307 * ^ ^
2308 * |- log->last_checkpoint |- ctx->pos+1
2309 * |- log->last_cp_seq |- ctx->seq+10001
2310 *
2311 * However, it is not safe to start the state machine yet, because data only
2312 * parities are not yet secured in RAID. To save these data only parities, we
2313 * rewrite them from seq+11.
2314 *
2315 * -----------------------------------------------------------------
2316 * | valid log | data only stripes | invalid log |
2317 * -----------------------------------------------------------------
2318 * ^ ^
2319 * |- log->last_checkpoint |- ctx->pos+n
2320 * |- log->last_cp_seq |- ctx->seq+10000+n
2321 *
2322 * If failure happens again during this process, the recovery can safe start
2323 * again from log->last_checkpoint.
2324 *
2325 * Once data only stripes are rewritten to journal, we move log_tail
2326 *
2327 * -----------------------------------------------------------------
2328 * | old log | data only stripes | invalid log |
2329 * -----------------------------------------------------------------
2330 * ^ ^
2331 * |- log->last_checkpoint |- ctx->pos+n
2332 * |- log->last_cp_seq |- ctx->seq+10000+n
2333 *
2334 * Then we can safely start the state machine. If failure happens from this
2335 * point on, the recovery will start from new log->last_checkpoint.
2336 */
2337static int
2338r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2339 struct r5l_recovery_ctx *ctx)
2340{
2341 struct stripe_head *sh;
2342 struct mddev *mddev = log->rdev->mddev;
2343 struct page *page;
2344 sector_t next_checkpoint = MaxSector;
2345
2346 page = alloc_page(GFP_KERNEL);
2347 if (!page) {
2348 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2349 mdname(mddev));
2350 return -ENOMEM;
2351 }
2352
2353 WARN_ON(list_empty(&ctx->cached_list));
2354
2355 list_for_each_entry(sh, &ctx->cached_list, lru) {
2356 struct r5l_meta_block *mb;
2357 int i;
2358 int offset;
2359 sector_t write_pos;
2360
2361 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2362 r5l_recovery_create_empty_meta_block(log, page,
2363 ctx->pos, ctx->seq);
2364 mb = page_address(page);
2365 offset = le32_to_cpu(mb->meta_size);
2366 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2367
2368 for (i = sh->disks; i--; ) {
2369 struct r5dev *dev = &sh->dev[i];
2370 struct r5l_payload_data_parity *payload;
2371 void *addr;
2372
2373 if (test_bit(R5_InJournal, &dev->flags)) {
2374 payload = (void *)mb + offset;
2375 payload->header.type = cpu_to_le16(
2376 R5LOG_PAYLOAD_DATA);
2377 payload->size = cpu_to_le32(BLOCK_SECTORS);
2378 payload->location = cpu_to_le64(
2379 raid5_compute_blocknr(sh, i, 0));
2380 addr = kmap_atomic(dev->page);
2381 payload->checksum[0] = cpu_to_le32(
2382 crc32c_le(log->uuid_checksum, addr,
2383 PAGE_SIZE));
2384 kunmap_atomic(addr);
2385 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2386 dev->page, REQ_OP_WRITE, false);
2387 write_pos = r5l_ring_add(log, write_pos,
2388 BLOCK_SECTORS);
2389 offset += sizeof(__le32) +
2390 sizeof(struct r5l_payload_data_parity);
2391
2392 }
2393 }
2394 mb->meta_size = cpu_to_le32(offset);
2395 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2396 mb, PAGE_SIZE));
2397 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2398 REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
2399 sh->log_start = ctx->pos;
2400 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2401 atomic_inc(&log->stripe_in_journal_count);
2402 ctx->pos = write_pos;
2403 ctx->seq += 1;
2404 next_checkpoint = sh->log_start;
2405 }
2406 log->next_checkpoint = next_checkpoint;
2407 __free_page(page);
2408 return 0;
2409}
2410
2411static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2412 struct r5l_recovery_ctx *ctx)
2413{
2414 struct mddev *mddev = log->rdev->mddev;
2415 struct r5conf *conf = mddev->private;
2416 struct stripe_head *sh, *next;
2417 bool cleared_pending = false;
2418
2419 if (ctx->data_only_stripes == 0)
2420 return;
2421
2422 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2423 cleared_pending = true;
2424 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2425 }
2426 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2427
2428 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2429 r5c_make_stripe_write_out(sh);
2430 set_bit(STRIPE_HANDLE, &sh->state);
2431 list_del_init(&sh->lru);
2432 raid5_release_stripe(sh);
2433 }
2434
2435 /* reuse conf->wait_for_quiescent in recovery */
2436 wait_event(conf->wait_for_quiescent,
2437 atomic_read(&conf->active_stripes) == 0);
2438
2439 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2440 if (cleared_pending)
2441 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2442}
2443
2444static int r5l_recovery_log(struct r5l_log *log)
2445{
2446 struct mddev *mddev = log->rdev->mddev;
2447 struct r5l_recovery_ctx *ctx;
2448 int ret;
2449 sector_t pos;
2450
2451 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2452 if (!ctx)
2453 return -ENOMEM;
2454
2455 ctx->pos = log->last_checkpoint;
2456 ctx->seq = log->last_cp_seq;
2457 INIT_LIST_HEAD(&ctx->cached_list);
2458 ctx->meta_page = alloc_page(GFP_KERNEL);
2459
2460 if (!ctx->meta_page) {
2461 ret = -ENOMEM;
2462 goto meta_page;
2463 }
2464
2465 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2466 ret = -ENOMEM;
2467 goto ra_pool;
2468 }
2469
2470 ret = r5c_recovery_flush_log(log, ctx);
2471
2472 if (ret)
2473 goto error;
2474
2475 pos = ctx->pos;
2476 ctx->seq += 10000;
2477
2478 if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
2479 pr_info("md/raid:%s: starting from clean shutdown\n",
2480 mdname(mddev));
2481 else
2482 pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2483 mdname(mddev), ctx->data_only_stripes,
2484 ctx->data_parity_stripes);
2485
2486 if (ctx->data_only_stripes == 0) {
2487 log->next_checkpoint = ctx->pos;
2488 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2489 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2490 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2491 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2492 mdname(mddev));
2493 ret = -EIO;
2494 goto error;
2495 }
2496
2497 log->log_start = ctx->pos;
2498 log->seq = ctx->seq;
2499 log->last_checkpoint = pos;
2500 r5l_write_super(log, pos);
2501
2502 r5c_recovery_flush_data_only_stripes(log, ctx);
2503 ret = 0;
2504error:
2505 r5l_recovery_free_ra_pool(log, ctx);
2506ra_pool:
2507 __free_page(ctx->meta_page);
2508meta_page:
2509 kfree(ctx);
2510 return ret;
2511}
2512
2513static void r5l_write_super(struct r5l_log *log, sector_t cp)
2514{
2515 struct mddev *mddev = log->rdev->mddev;
2516
2517 log->rdev->journal_tail = cp;
2518 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2519}
2520
2521static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2522{
2523 struct r5conf *conf;
2524 int ret;
2525
2526 ret = mddev_lock(mddev);
2527 if (ret)
2528 return ret;
2529
2530 conf = mddev->private;
2531 if (!conf || !conf->log)
2532 goto out_unlock;
2533
2534 switch (conf->log->r5c_journal_mode) {
2535 case R5C_JOURNAL_MODE_WRITE_THROUGH:
2536 ret = snprintf(
2537 page, PAGE_SIZE, "[%s] %s\n",
2538 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2539 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2540 break;
2541 case R5C_JOURNAL_MODE_WRITE_BACK:
2542 ret = snprintf(
2543 page, PAGE_SIZE, "%s [%s]\n",
2544 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2545 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2546 break;
2547 default:
2548 ret = 0;
2549 }
2550
2551out_unlock:
2552 mddev_unlock(mddev);
2553 return ret;
2554}
2555
2556/*
2557 * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2558 *
2559 * @mode as defined in 'enum r5c_journal_mode'.
2560 *
2561 */
2562int r5c_journal_mode_set(struct mddev *mddev, int mode)
2563{
2564 struct r5conf *conf;
2565
2566 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2567 mode > R5C_JOURNAL_MODE_WRITE_BACK)
2568 return -EINVAL;
2569
2570 conf = mddev->private;
2571 if (!conf || !conf->log)
2572 return -ENODEV;
2573
2574 if (raid5_calc_degraded(conf) > 0 &&
2575 mode == R5C_JOURNAL_MODE_WRITE_BACK)
2576 return -EINVAL;
2577
2578 conf->log->r5c_journal_mode = mode;
2579
2580 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2581 mdname(mddev), mode, r5c_journal_mode_str[mode]);
2582 return 0;
2583}
2584EXPORT_SYMBOL(r5c_journal_mode_set);
2585
2586static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2587 const char *page, size_t length)
2588{
2589 int mode = ARRAY_SIZE(r5c_journal_mode_str);
2590 size_t len = length;
2591 int ret;
2592
2593 if (len < 2)
2594 return -EINVAL;
2595
2596 if (page[len - 1] == '\n')
2597 len--;
2598
2599 while (mode--)
2600 if (strlen(r5c_journal_mode_str[mode]) == len &&
2601 !strncmp(page, r5c_journal_mode_str[mode], len))
2602 break;
2603 ret = mddev_suspend_and_lock(mddev);
2604 if (ret)
2605 return ret;
2606 ret = r5c_journal_mode_set(mddev, mode);
2607 mddev_unlock_and_resume(mddev);
2608 return ret ?: length;
2609}
2610
2611struct md_sysfs_entry
2612r5c_journal_mode = __ATTR(journal_mode, 0644,
2613 r5c_journal_mode_show, r5c_journal_mode_store);
2614
2615/*
2616 * Try handle write operation in caching phase. This function should only
2617 * be called in write-back mode.
2618 *
2619 * If all outstanding writes can be handled in caching phase, returns 0
2620 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2621 * and returns -EAGAIN
2622 */
2623int r5c_try_caching_write(struct r5conf *conf,
2624 struct stripe_head *sh,
2625 struct stripe_head_state *s,
2626 int disks)
2627{
2628 struct r5l_log *log = READ_ONCE(conf->log);
2629 int i;
2630 struct r5dev *dev;
2631 int to_cache = 0;
2632 void __rcu **pslot;
2633 sector_t tree_index;
2634 int ret;
2635 uintptr_t refcount;
2636
2637 BUG_ON(!r5c_is_writeback(log));
2638
2639 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
2640 /*
2641 * There are two different scenarios here:
2642 * 1. The stripe has some data cached, and it is sent to
2643 * write-out phase for reclaim
2644 * 2. The stripe is clean, and this is the first write
2645 *
2646 * For 1, return -EAGAIN, so we continue with
2647 * handle_stripe_dirtying().
2648 *
2649 * For 2, set STRIPE_R5C_CACHING and continue with caching
2650 * write.
2651 */
2652
2653 /* case 1: anything injournal or anything in written */
2654 if (s->injournal > 0 || s->written > 0)
2655 return -EAGAIN;
2656 /* case 2 */
2657 set_bit(STRIPE_R5C_CACHING, &sh->state);
2658 }
2659
2660 /*
2661 * When run in degraded mode, array is set to write-through mode.
2662 * This check helps drain pending write safely in the transition to
2663 * write-through mode.
2664 *
2665 * When a stripe is syncing, the write is also handled in write
2666 * through mode.
2667 */
2668 if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
2669 r5c_make_stripe_write_out(sh);
2670 return -EAGAIN;
2671 }
2672
2673 for (i = disks; i--; ) {
2674 dev = &sh->dev[i];
2675 /* if non-overwrite, use writing-out phase */
2676 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2677 !test_bit(R5_InJournal, &dev->flags)) {
2678 r5c_make_stripe_write_out(sh);
2679 return -EAGAIN;
2680 }
2681 }
2682
2683 /* if the stripe is not counted in big_stripe_tree, add it now */
2684 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
2685 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2686 tree_index = r5c_tree_index(conf, sh->sector);
2687 spin_lock(&log->tree_lock);
2688 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2689 tree_index);
2690 if (pslot) {
2691 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2692 pslot, &log->tree_lock) >>
2693 R5C_RADIX_COUNT_SHIFT;
2694 radix_tree_replace_slot(
2695 &log->big_stripe_tree, pslot,
2696 (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
2697 } else {
2698 /*
2699 * this radix_tree_insert can fail safely, so no
2700 * need to call radix_tree_preload()
2701 */
2702 ret = radix_tree_insert(
2703 &log->big_stripe_tree, tree_index,
2704 (void *)(1 << R5C_RADIX_COUNT_SHIFT));
2705 if (ret) {
2706 spin_unlock(&log->tree_lock);
2707 r5c_make_stripe_write_out(sh);
2708 return -EAGAIN;
2709 }
2710 }
2711 spin_unlock(&log->tree_lock);
2712
2713 /*
2714 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2715 * counted in the radix tree
2716 */
2717 set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
2718 atomic_inc(&conf->r5c_cached_partial_stripes);
2719 }
2720
2721 for (i = disks; i--; ) {
2722 dev = &sh->dev[i];
2723 if (dev->towrite) {
2724 set_bit(R5_Wantwrite, &dev->flags);
2725 set_bit(R5_Wantdrain, &dev->flags);
2726 set_bit(R5_LOCKED, &dev->flags);
2727 to_cache++;
2728 }
2729 }
2730
2731 if (to_cache) {
2732 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2733 /*
2734 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2735 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2736 * r5c_handle_data_cached()
2737 */
2738 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
2739 }
2740
2741 return 0;
2742}
2743
2744/*
2745 * free extra pages (orig_page) we allocated for prexor
2746 */
2747void r5c_release_extra_page(struct stripe_head *sh)
2748{
2749 struct r5conf *conf = sh->raid_conf;
2750 int i;
2751 bool using_disk_info_extra_page;
2752
2753 using_disk_info_extra_page =
2754 sh->dev[0].orig_page == conf->disks[0].extra_page;
2755
2756 for (i = sh->disks; i--; )
2757 if (sh->dev[i].page != sh->dev[i].orig_page) {
2758 struct page *p = sh->dev[i].orig_page;
2759
2760 sh->dev[i].orig_page = sh->dev[i].page;
2761 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2762
2763 if (!using_disk_info_extra_page)
2764 put_page(p);
2765 }
2766
2767 if (using_disk_info_extra_page) {
2768 clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2769 md_wakeup_thread(conf->mddev->thread);
2770 }
2771}
2772
2773void r5c_use_extra_page(struct stripe_head *sh)
2774{
2775 struct r5conf *conf = sh->raid_conf;
2776 int i;
2777 struct r5dev *dev;
2778
2779 for (i = sh->disks; i--; ) {
2780 dev = &sh->dev[i];
2781 if (dev->orig_page != dev->page)
2782 put_page(dev->orig_page);
2783 dev->orig_page = conf->disks[i].extra_page;
2784 }
2785}
2786
2787/*
2788 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2789 * stripe is committed to RAID disks.
2790 */
2791void r5c_finish_stripe_write_out(struct r5conf *conf,
2792 struct stripe_head *sh,
2793 struct stripe_head_state *s)
2794{
2795 struct r5l_log *log = READ_ONCE(conf->log);
2796 int i;
2797 sector_t tree_index;
2798 void __rcu **pslot;
2799 uintptr_t refcount;
2800
2801 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2802 return;
2803
2804 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2805 clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2806
2807 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2808 return;
2809
2810 for (i = sh->disks; i--; ) {
2811 clear_bit(R5_InJournal, &sh->dev[i].flags);
2812 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2813 wake_up_bit(&sh->dev[i].flags, R5_Overlap);
2814 }
2815
2816 /*
2817 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2818 * We updated R5_InJournal, so we also update s->injournal.
2819 */
2820 s->injournal = 0;
2821
2822 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2823 if (atomic_dec_and_test(&conf->pending_full_writes))
2824 md_wakeup_thread(conf->mddev->thread);
2825
2826 spin_lock_irq(&log->stripe_in_journal_lock);
2827 list_del_init(&sh->r5c);
2828 spin_unlock_irq(&log->stripe_in_journal_lock);
2829 sh->log_start = MaxSector;
2830
2831 atomic_dec(&log->stripe_in_journal_count);
2832 r5c_update_log_state(log);
2833
2834 /* stop counting this stripe in big_stripe_tree */
2835 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
2836 test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2837 tree_index = r5c_tree_index(conf, sh->sector);
2838 spin_lock(&log->tree_lock);
2839 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2840 tree_index);
2841 BUG_ON(pslot == NULL);
2842 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2843 pslot, &log->tree_lock) >>
2844 R5C_RADIX_COUNT_SHIFT;
2845 if (refcount == 1)
2846 radix_tree_delete(&log->big_stripe_tree, tree_index);
2847 else
2848 radix_tree_replace_slot(
2849 &log->big_stripe_tree, pslot,
2850 (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
2851 spin_unlock(&log->tree_lock);
2852 }
2853
2854 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
2855 BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
2856 atomic_dec(&conf->r5c_flushing_partial_stripes);
2857 atomic_dec(&conf->r5c_cached_partial_stripes);
2858 }
2859
2860 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2861 BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
2862 atomic_dec(&conf->r5c_flushing_full_stripes);
2863 atomic_dec(&conf->r5c_cached_full_stripes);
2864 }
2865
2866 r5l_append_flush_payload(log, sh->sector);
2867 /* stripe is flused to raid disks, we can do resync now */
2868 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2869 set_bit(STRIPE_HANDLE, &sh->state);
2870}
2871
2872int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2873{
2874 struct r5conf *conf = sh->raid_conf;
2875 int pages = 0;
2876 int reserve;
2877 int i;
2878 int ret = 0;
2879
2880 BUG_ON(!log);
2881
2882 for (i = 0; i < sh->disks; i++) {
2883 void *addr;
2884
2885 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2886 continue;
2887 addr = kmap_atomic(sh->dev[i].page);
2888 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2889 addr, PAGE_SIZE);
2890 kunmap_atomic(addr);
2891 pages++;
2892 }
2893 WARN_ON(pages == 0);
2894
2895 /*
2896 * The stripe must enter state machine again to call endio, so
2897 * don't delay.
2898 */
2899 clear_bit(STRIPE_DELAYED, &sh->state);
2900 atomic_inc(&sh->count);
2901
2902 mutex_lock(&log->io_mutex);
2903 /* meta + data */
2904 reserve = (1 + pages) << (PAGE_SHIFT - 9);
2905
2906 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2907 sh->log_start == MaxSector)
2908 r5l_add_no_space_stripe(log, sh);
2909 else if (!r5l_has_free_space(log, reserve)) {
2910 if (sh->log_start == log->last_checkpoint)
2911 BUG();
2912 else
2913 r5l_add_no_space_stripe(log, sh);
2914 } else {
2915 ret = r5l_log_stripe(log, sh, pages, 0);
2916 if (ret) {
2917 spin_lock_irq(&log->io_list_lock);
2918 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2919 spin_unlock_irq(&log->io_list_lock);
2920 }
2921 }
2922
2923 mutex_unlock(&log->io_mutex);
2924 return 0;
2925}
2926
2927/* check whether this big stripe is in write back cache. */
2928bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
2929{
2930 struct r5l_log *log = READ_ONCE(conf->log);
2931 sector_t tree_index;
2932 void *slot;
2933
2934 if (!log)
2935 return false;
2936
2937 tree_index = r5c_tree_index(conf, sect);
2938 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2939 return slot != NULL;
2940}
2941
2942static int r5l_load_log(struct r5l_log *log)
2943{
2944 struct md_rdev *rdev = log->rdev;
2945 struct page *page;
2946 struct r5l_meta_block *mb;
2947 sector_t cp = log->rdev->journal_tail;
2948 u32 stored_crc, expected_crc;
2949 bool create_super = false;
2950 int ret = 0;
2951
2952 /* Make sure it's valid */
2953 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2954 cp = 0;
2955 page = alloc_page(GFP_KERNEL);
2956 if (!page)
2957 return -ENOMEM;
2958
2959 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
2960 ret = -EIO;
2961 goto ioerr;
2962 }
2963 mb = page_address(page);
2964
2965 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2966 mb->version != R5LOG_VERSION) {
2967 create_super = true;
2968 goto create;
2969 }
2970 stored_crc = le32_to_cpu(mb->checksum);
2971 mb->checksum = 0;
2972 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2973 if (stored_crc != expected_crc) {
2974 create_super = true;
2975 goto create;
2976 }
2977 if (le64_to_cpu(mb->position) != cp) {
2978 create_super = true;
2979 goto create;
2980 }
2981create:
2982 if (create_super) {
2983 log->last_cp_seq = get_random_u32();
2984 cp = 0;
2985 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
2986 /*
2987 * Make sure super points to correct address. Log might have
2988 * data very soon. If super hasn't correct log tail address,
2989 * recovery can't find the log
2990 */
2991 r5l_write_super(log, cp);
2992 } else
2993 log->last_cp_seq = le64_to_cpu(mb->seq);
2994
2995 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
2996 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
2997 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
2998 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
2999 log->last_checkpoint = cp;
3000
3001 __free_page(page);
3002
3003 if (create_super) {
3004 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3005 log->seq = log->last_cp_seq + 1;
3006 log->next_checkpoint = cp;
3007 } else
3008 ret = r5l_recovery_log(log);
3009
3010 r5c_update_log_state(log);
3011 return ret;
3012ioerr:
3013 __free_page(page);
3014 return ret;
3015}
3016
3017int r5l_start(struct r5l_log *log)
3018{
3019 int ret;
3020
3021 if (!log)
3022 return 0;
3023
3024 ret = r5l_load_log(log);
3025 if (ret) {
3026 struct mddev *mddev = log->rdev->mddev;
3027 struct r5conf *conf = mddev->private;
3028
3029 r5l_exit_log(conf);
3030 }
3031 return ret;
3032}
3033
3034void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
3035{
3036 struct r5conf *conf = mddev->private;
3037 struct r5l_log *log = READ_ONCE(conf->log);
3038
3039 if (!log)
3040 return;
3041
3042 if ((raid5_calc_degraded(conf) > 0 ||
3043 test_bit(Journal, &rdev->flags)) &&
3044 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3045 schedule_work(&log->disable_writeback_work);
3046}
3047
3048int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
3049{
3050 struct r5l_log *log;
3051 struct md_thread *thread;
3052 int ret;
3053
3054 pr_debug("md/raid:%s: using device %pg as journal\n",
3055 mdname(conf->mddev), rdev->bdev);
3056
3057 if (PAGE_SIZE != 4096)
3058 return -EINVAL;
3059
3060 /*
3061 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3062 * raid_disks r5l_payload_data_parity.
3063 *
3064 * Write journal and cache does not work for very big array
3065 * (raid_disks > 203)
3066 */
3067 if (sizeof(struct r5l_meta_block) +
3068 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3069 conf->raid_disks) > PAGE_SIZE) {
3070 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3071 mdname(conf->mddev), conf->raid_disks);
3072 return -EINVAL;
3073 }
3074
3075 log = kzalloc(sizeof(*log), GFP_KERNEL);
3076 if (!log)
3077 return -ENOMEM;
3078 log->rdev = rdev;
3079 log->need_cache_flush = bdev_write_cache(rdev->bdev);
3080 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3081 sizeof(rdev->mddev->uuid));
3082
3083 mutex_init(&log->io_mutex);
3084
3085 spin_lock_init(&log->io_list_lock);
3086 INIT_LIST_HEAD(&log->running_ios);
3087 INIT_LIST_HEAD(&log->io_end_ios);
3088 INIT_LIST_HEAD(&log->flushing_ios);
3089 INIT_LIST_HEAD(&log->finished_ios);
3090
3091 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3092 if (!log->io_kc)
3093 goto io_kc;
3094
3095 ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3096 if (ret)
3097 goto io_pool;
3098
3099 ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3100 if (ret)
3101 goto io_bs;
3102
3103 ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3104 if (ret)
3105 goto out_mempool;
3106
3107 spin_lock_init(&log->tree_lock);
3108 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3109
3110 thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
3111 "reclaim");
3112 if (!thread)
3113 goto reclaim_thread;
3114
3115 thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
3116 rcu_assign_pointer(log->reclaim_thread, thread);
3117
3118 init_waitqueue_head(&log->iounit_wait);
3119
3120 INIT_LIST_HEAD(&log->no_mem_stripes);
3121
3122 INIT_LIST_HEAD(&log->no_space_stripes);
3123 spin_lock_init(&log->no_space_stripes_lock);
3124
3125 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3126 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3127
3128 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3129 INIT_LIST_HEAD(&log->stripe_in_journal_list);
3130 spin_lock_init(&log->stripe_in_journal_lock);
3131 atomic_set(&log->stripe_in_journal_count, 0);
3132
3133 WRITE_ONCE(conf->log, log);
3134
3135 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
3136 return 0;
3137
3138reclaim_thread:
3139 mempool_exit(&log->meta_pool);
3140out_mempool:
3141 bioset_exit(&log->bs);
3142io_bs:
3143 mempool_exit(&log->io_pool);
3144io_pool:
3145 kmem_cache_destroy(log->io_kc);
3146io_kc:
3147 kfree(log);
3148 return -EINVAL;
3149}
3150
3151void r5l_exit_log(struct r5conf *conf)
3152{
3153 struct r5l_log *log = conf->log;
3154
3155 md_unregister_thread(conf->mddev, &log->reclaim_thread);
3156
3157 /*
3158 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3159 * ensure disable_writeback_work wakes up and exits.
3160 */
3161 WRITE_ONCE(conf->log, NULL);
3162 wake_up(&conf->mddev->sb_wait);
3163 flush_work(&log->disable_writeback_work);
3164
3165 mempool_exit(&log->meta_pool);
3166 bioset_exit(&log->bs);
3167 mempool_exit(&log->io_pool);
3168 kmem_cache_destroy(log->io_kc);
3169 kfree(log);
3170}