Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
4 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
5 */
6#include <linux/kernel.h>
7#include <linux/wait.h>
8#include <linux/blkdev.h>
9#include <linux/slab.h>
10#include <linux/raid/md_p.h>
11#include <linux/crc32c.h>
12#include <linux/random.h>
13#include <linux/kthread.h>
14#include <linux/types.h>
15#include "md.h"
16#include "raid5.h"
17#include "md-bitmap.h"
18#include "raid5-log.h"
19
20/*
21 * metadata/data stored in disk with 4k size unit (a block) regardless
22 * underneath hardware sector size. only works with PAGE_SIZE == 4096
23 */
24#define BLOCK_SECTORS (8)
25#define BLOCK_SECTOR_SHIFT (3)
26
27/*
28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
29 *
30 * In write through mode, the reclaim runs every log->max_free_space.
31 * This can prevent the recovery scans for too long
32 */
33#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
34#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
35
36/* wake up reclaim thread periodically */
37#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
38/* start flush with these full stripes */
39#define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
40/* reclaim stripes in groups */
41#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
42
43/*
44 * We only need 2 bios per I/O unit to make progress, but ensure we
45 * have a few more available to not get too tight.
46 */
47#define R5L_POOL_SIZE 4
48
49static char *r5c_journal_mode_str[] = {"write-through",
50 "write-back"};
51/*
52 * raid5 cache state machine
53 *
54 * With the RAID cache, each stripe works in two phases:
55 * - caching phase
56 * - writing-out phase
57 *
58 * These two phases are controlled by bit STRIPE_R5C_CACHING:
59 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
60 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
61 *
62 * When there is no journal, or the journal is in write-through mode,
63 * the stripe is always in writing-out phase.
64 *
65 * For write-back journal, the stripe is sent to caching phase on write
66 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
67 * the write-out phase by clearing STRIPE_R5C_CACHING.
68 *
69 * Stripes in caching phase do not write the raid disks. Instead, all
70 * writes are committed from the log device. Therefore, a stripe in
71 * caching phase handles writes as:
72 * - write to log device
73 * - return IO
74 *
75 * Stripes in writing-out phase handle writes as:
76 * - calculate parity
77 * - write pending data and parity to journal
78 * - write data and parity to raid disks
79 * - return IO for pending writes
80 */
81
82struct r5l_log {
83 struct md_rdev *rdev;
84
85 u32 uuid_checksum;
86
87 sector_t device_size; /* log device size, round to
88 * BLOCK_SECTORS */
89 sector_t max_free_space; /* reclaim run if free space is at
90 * this size */
91
92 sector_t last_checkpoint; /* log tail. where recovery scan
93 * starts from */
94 u64 last_cp_seq; /* log tail sequence */
95
96 sector_t log_start; /* log head. where new data appends */
97 u64 seq; /* log head sequence */
98
99 sector_t next_checkpoint;
100
101 struct mutex io_mutex;
102 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
103
104 spinlock_t io_list_lock;
105 struct list_head running_ios; /* io_units which are still running,
106 * and have not yet been completely
107 * written to the log */
108 struct list_head io_end_ios; /* io_units which have been completely
109 * written to the log but not yet written
110 * to the RAID */
111 struct list_head flushing_ios; /* io_units which are waiting for log
112 * cache flush */
113 struct list_head finished_ios; /* io_units which settle down in log disk */
114 struct bio flush_bio;
115
116 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
117
118 struct kmem_cache *io_kc;
119 mempool_t io_pool;
120 struct bio_set bs;
121 mempool_t meta_pool;
122
123 struct md_thread __rcu *reclaim_thread;
124 unsigned long reclaim_target; /* number of space that need to be
125 * reclaimed. if it's 0, reclaim spaces
126 * used by io_units which are in
127 * IO_UNIT_STRIPE_END state (eg, reclaim
128 * doesn't wait for specific io_unit
129 * switching to IO_UNIT_STRIPE_END
130 * state) */
131 wait_queue_head_t iounit_wait;
132
133 struct list_head no_space_stripes; /* pending stripes, log has no space */
134 spinlock_t no_space_stripes_lock;
135
136 bool need_cache_flush;
137
138 /* for r5c_cache */
139 enum r5c_journal_mode r5c_journal_mode;
140
141 /* all stripes in r5cache, in the order of seq at sh->log_start */
142 struct list_head stripe_in_journal_list;
143
144 spinlock_t stripe_in_journal_lock;
145 atomic_t stripe_in_journal_count;
146
147 /* to submit async io_units, to fulfill ordering of flush */
148 struct work_struct deferred_io_work;
149 /* to disable write back during in degraded mode */
150 struct work_struct disable_writeback_work;
151
152 /* to for chunk_aligned_read in writeback mode, details below */
153 spinlock_t tree_lock;
154 struct radix_tree_root big_stripe_tree;
155};
156
157/*
158 * Enable chunk_aligned_read() with write back cache.
159 *
160 * Each chunk may contain more than one stripe (for example, a 256kB
161 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
162 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
163 * For each big_stripe, we count how many stripes of this big_stripe
164 * are in the write back cache. These data are tracked in a radix tree
165 * (big_stripe_tree). We use radix_tree item pointer as the counter.
166 * r5c_tree_index() is used to calculate keys for the radix tree.
167 *
168 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
169 * big_stripe of each chunk in the tree. If this big_stripe is in the
170 * tree, chunk_aligned_read() aborts. This look up is protected by
171 * rcu_read_lock().
172 *
173 * It is necessary to remember whether a stripe is counted in
174 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
175 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
176 * two flags are set, the stripe is counted in big_stripe_tree. This
177 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
178 * r5c_try_caching_write(); and moving clear_bit of
179 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
180 * r5c_finish_stripe_write_out().
181 */
182
183/*
184 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
185 * So it is necessary to left shift the counter by 2 bits before using it
186 * as data pointer of the tree.
187 */
188#define R5C_RADIX_COUNT_SHIFT 2
189
190/*
191 * calculate key for big_stripe_tree
192 *
193 * sect: align_bi->bi_iter.bi_sector or sh->sector
194 */
195static inline sector_t r5c_tree_index(struct r5conf *conf,
196 sector_t sect)
197{
198 sector_div(sect, conf->chunk_sectors);
199 return sect;
200}
201
202/*
203 * an IO range starts from a meta data block and end at the next meta data
204 * block. The io unit's the meta data block tracks data/parity followed it. io
205 * unit is written to log disk with normal write, as we always flush log disk
206 * first and then start move data to raid disks, there is no requirement to
207 * write io unit with FLUSH/FUA
208 */
209struct r5l_io_unit {
210 struct r5l_log *log;
211
212 struct page *meta_page; /* store meta block */
213 int meta_offset; /* current offset in meta_page */
214
215 struct bio *current_bio;/* current_bio accepting new data */
216
217 atomic_t pending_stripe;/* how many stripes not flushed to raid */
218 u64 seq; /* seq number of the metablock */
219 sector_t log_start; /* where the io_unit starts */
220 sector_t log_end; /* where the io_unit ends */
221 struct list_head log_sibling; /* log->running_ios */
222 struct list_head stripe_list; /* stripes added to the io_unit */
223
224 int state;
225 bool need_split_bio;
226 struct bio *split_bio;
227
228 unsigned int has_flush:1; /* include flush request */
229 unsigned int has_fua:1; /* include fua request */
230 unsigned int has_null_flush:1; /* include null flush request */
231 unsigned int has_flush_payload:1; /* include flush payload */
232 /*
233 * io isn't sent yet, flush/fua request can only be submitted till it's
234 * the first IO in running_ios list
235 */
236 unsigned int io_deferred:1;
237
238 struct bio_list flush_barriers; /* size == 0 flush bios */
239};
240
241/* r5l_io_unit state */
242enum r5l_io_unit_state {
243 IO_UNIT_RUNNING = 0, /* accepting new IO */
244 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
245 * don't accepting new bio */
246 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
247 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
248};
249
250bool r5c_is_writeback(struct r5l_log *log)
251{
252 return (log != NULL &&
253 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
254}
255
256static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
257{
258 start += inc;
259 if (start >= log->device_size)
260 start = start - log->device_size;
261 return start;
262}
263
264static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
265 sector_t end)
266{
267 if (end >= start)
268 return end - start;
269 else
270 return end + log->device_size - start;
271}
272
273static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
274{
275 sector_t used_size;
276
277 used_size = r5l_ring_distance(log, log->last_checkpoint,
278 log->log_start);
279
280 return log->device_size > used_size + size;
281}
282
283static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
284 enum r5l_io_unit_state state)
285{
286 if (WARN_ON(io->state >= state))
287 return;
288 io->state = state;
289}
290
291static void
292r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
293{
294 struct bio *wbi, *wbi2;
295
296 wbi = dev->written;
297 dev->written = NULL;
298 while (wbi && wbi->bi_iter.bi_sector <
299 dev->sector + RAID5_STRIPE_SECTORS(conf)) {
300 wbi2 = r5_next_bio(conf, wbi, dev->sector);
301 md_write_end(conf->mddev);
302 bio_endio(wbi);
303 wbi = wbi2;
304 }
305}
306
307void r5c_handle_cached_data_endio(struct r5conf *conf,
308 struct stripe_head *sh, int disks)
309{
310 int i;
311
312 for (i = sh->disks; i--; ) {
313 if (sh->dev[i].written) {
314 set_bit(R5_UPTODATE, &sh->dev[i].flags);
315 r5c_return_dev_pending_writes(conf, &sh->dev[i]);
316 }
317 }
318}
319
320void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
321
322/* Check whether we should flush some stripes to free up stripe cache */
323void r5c_check_stripe_cache_usage(struct r5conf *conf)
324{
325 int total_cached;
326 struct r5l_log *log = READ_ONCE(conf->log);
327
328 if (!r5c_is_writeback(log))
329 return;
330
331 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
332 atomic_read(&conf->r5c_cached_full_stripes);
333
334 /*
335 * The following condition is true for either of the following:
336 * - stripe cache pressure high:
337 * total_cached > 3/4 min_nr_stripes ||
338 * empty_inactive_list_nr > 0
339 * - stripe cache pressure moderate:
340 * total_cached > 1/2 min_nr_stripes
341 */
342 if (total_cached > conf->min_nr_stripes * 1 / 2 ||
343 atomic_read(&conf->empty_inactive_list_nr) > 0)
344 r5l_wake_reclaim(log, 0);
345}
346
347/*
348 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
349 * stripes in the cache
350 */
351void r5c_check_cached_full_stripe(struct r5conf *conf)
352{
353 struct r5l_log *log = READ_ONCE(conf->log);
354
355 if (!r5c_is_writeback(log))
356 return;
357
358 /*
359 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
360 * or a full stripe (chunk size / 4k stripes).
361 */
362 if (atomic_read(&conf->r5c_cached_full_stripes) >=
363 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
364 conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
365 r5l_wake_reclaim(log, 0);
366}
367
368/*
369 * Total log space (in sectors) needed to flush all data in cache
370 *
371 * To avoid deadlock due to log space, it is necessary to reserve log
372 * space to flush critical stripes (stripes that occupying log space near
373 * last_checkpoint). This function helps check how much log space is
374 * required to flush all cached stripes.
375 *
376 * To reduce log space requirements, two mechanisms are used to give cache
377 * flush higher priorities:
378 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
379 * stripes ALREADY in journal can be flushed w/o pending writes;
380 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
381 * can be delayed (r5l_add_no_space_stripe).
382 *
383 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
384 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
385 * pages of journal space. For stripes that has not passed 1, flushing it
386 * requires (conf->raid_disks + 1) pages of journal space. There are at
387 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
388 * required to flush all cached stripes (in pages) is:
389 *
390 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
391 * (group_cnt + 1) * (raid_disks + 1)
392 * or
393 * (stripe_in_journal_count) * (max_degraded + 1) +
394 * (group_cnt + 1) * (raid_disks - max_degraded)
395 */
396static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
397{
398 struct r5l_log *log = READ_ONCE(conf->log);
399
400 if (!r5c_is_writeback(log))
401 return 0;
402
403 return BLOCK_SECTORS *
404 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
405 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
406}
407
408/*
409 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
410 *
411 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
412 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
413 * device is less than 2x of reclaim_required_space.
414 */
415static inline void r5c_update_log_state(struct r5l_log *log)
416{
417 struct r5conf *conf = log->rdev->mddev->private;
418 sector_t free_space;
419 sector_t reclaim_space;
420 bool wake_reclaim = false;
421
422 if (!r5c_is_writeback(log))
423 return;
424
425 free_space = r5l_ring_distance(log, log->log_start,
426 log->last_checkpoint);
427 reclaim_space = r5c_log_required_to_flush_cache(conf);
428 if (free_space < 2 * reclaim_space)
429 set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
430 else {
431 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
432 wake_reclaim = true;
433 clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
434 }
435 if (free_space < 3 * reclaim_space)
436 set_bit(R5C_LOG_TIGHT, &conf->cache_state);
437 else
438 clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
439
440 if (wake_reclaim)
441 r5l_wake_reclaim(log, 0);
442}
443
444/*
445 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
446 * This function should only be called in write-back mode.
447 */
448void r5c_make_stripe_write_out(struct stripe_head *sh)
449{
450 struct r5conf *conf = sh->raid_conf;
451 struct r5l_log *log = READ_ONCE(conf->log);
452
453 BUG_ON(!r5c_is_writeback(log));
454
455 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
456 clear_bit(STRIPE_R5C_CACHING, &sh->state);
457
458 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
459 atomic_inc(&conf->preread_active_stripes);
460}
461
462static void r5c_handle_data_cached(struct stripe_head *sh)
463{
464 int i;
465
466 for (i = sh->disks; i--; )
467 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
468 set_bit(R5_InJournal, &sh->dev[i].flags);
469 clear_bit(R5_LOCKED, &sh->dev[i].flags);
470 }
471 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
472}
473
474/*
475 * this journal write must contain full parity,
476 * it may also contain some data pages
477 */
478static void r5c_handle_parity_cached(struct stripe_head *sh)
479{
480 int i;
481
482 for (i = sh->disks; i--; )
483 if (test_bit(R5_InJournal, &sh->dev[i].flags))
484 set_bit(R5_Wantwrite, &sh->dev[i].flags);
485}
486
487/*
488 * Setting proper flags after writing (or flushing) data and/or parity to the
489 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
490 */
491static void r5c_finish_cache_stripe(struct stripe_head *sh)
492{
493 struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
494
495 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
496 BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
497 /*
498 * Set R5_InJournal for parity dev[pd_idx]. This means
499 * all data AND parity in the journal. For RAID 6, it is
500 * NOT necessary to set the flag for dev[qd_idx], as the
501 * two parities are written out together.
502 */
503 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
504 } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
505 r5c_handle_data_cached(sh);
506 } else {
507 r5c_handle_parity_cached(sh);
508 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
509 }
510}
511
512static void r5l_io_run_stripes(struct r5l_io_unit *io)
513{
514 struct stripe_head *sh, *next;
515
516 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
517 list_del_init(&sh->log_list);
518
519 r5c_finish_cache_stripe(sh);
520
521 set_bit(STRIPE_HANDLE, &sh->state);
522 raid5_release_stripe(sh);
523 }
524}
525
526static void r5l_log_run_stripes(struct r5l_log *log)
527{
528 struct r5l_io_unit *io, *next;
529
530 lockdep_assert_held(&log->io_list_lock);
531
532 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
533 /* don't change list order */
534 if (io->state < IO_UNIT_IO_END)
535 break;
536
537 list_move_tail(&io->log_sibling, &log->finished_ios);
538 r5l_io_run_stripes(io);
539 }
540}
541
542static void r5l_move_to_end_ios(struct r5l_log *log)
543{
544 struct r5l_io_unit *io, *next;
545
546 lockdep_assert_held(&log->io_list_lock);
547
548 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
549 /* don't change list order */
550 if (io->state < IO_UNIT_IO_END)
551 break;
552 list_move_tail(&io->log_sibling, &log->io_end_ios);
553 }
554}
555
556static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
557static void r5l_log_endio(struct bio *bio)
558{
559 struct r5l_io_unit *io = bio->bi_private;
560 struct r5l_io_unit *io_deferred;
561 struct r5l_log *log = io->log;
562 unsigned long flags;
563 bool has_null_flush;
564 bool has_flush_payload;
565
566 if (bio->bi_status)
567 md_error(log->rdev->mddev, log->rdev);
568
569 bio_put(bio);
570 mempool_free(io->meta_page, &log->meta_pool);
571
572 spin_lock_irqsave(&log->io_list_lock, flags);
573 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
574
575 /*
576 * if the io doesn't not have null_flush or flush payload,
577 * it is not safe to access it after releasing io_list_lock.
578 * Therefore, it is necessary to check the condition with
579 * the lock held.
580 */
581 has_null_flush = io->has_null_flush;
582 has_flush_payload = io->has_flush_payload;
583
584 if (log->need_cache_flush && !list_empty(&io->stripe_list))
585 r5l_move_to_end_ios(log);
586 else
587 r5l_log_run_stripes(log);
588 if (!list_empty(&log->running_ios)) {
589 /*
590 * FLUSH/FUA io_unit is deferred because of ordering, now we
591 * can dispatch it
592 */
593 io_deferred = list_first_entry(&log->running_ios,
594 struct r5l_io_unit, log_sibling);
595 if (io_deferred->io_deferred)
596 schedule_work(&log->deferred_io_work);
597 }
598
599 spin_unlock_irqrestore(&log->io_list_lock, flags);
600
601 if (log->need_cache_flush)
602 md_wakeup_thread(log->rdev->mddev->thread);
603
604 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
605 if (has_null_flush) {
606 struct bio *bi;
607
608 WARN_ON(bio_list_empty(&io->flush_barriers));
609 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
610 bio_endio(bi);
611 if (atomic_dec_and_test(&io->pending_stripe)) {
612 __r5l_stripe_write_finished(io);
613 return;
614 }
615 }
616 }
617 /* decrease pending_stripe for flush payload */
618 if (has_flush_payload)
619 if (atomic_dec_and_test(&io->pending_stripe))
620 __r5l_stripe_write_finished(io);
621}
622
623static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
624{
625 unsigned long flags;
626
627 spin_lock_irqsave(&log->io_list_lock, flags);
628 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
629 spin_unlock_irqrestore(&log->io_list_lock, flags);
630
631 /*
632 * In case of journal device failures, submit_bio will get error
633 * and calls endio, then active stripes will continue write
634 * process. Therefore, it is not necessary to check Faulty bit
635 * of journal device here.
636 *
637 * We can't check split_bio after current_bio is submitted. If
638 * io->split_bio is null, after current_bio is submitted, current_bio
639 * might already be completed and the io_unit is freed. We submit
640 * split_bio first to avoid the issue.
641 */
642 if (io->split_bio) {
643 if (io->has_flush)
644 io->split_bio->bi_opf |= REQ_PREFLUSH;
645 if (io->has_fua)
646 io->split_bio->bi_opf |= REQ_FUA;
647 submit_bio(io->split_bio);
648 }
649
650 if (io->has_flush)
651 io->current_bio->bi_opf |= REQ_PREFLUSH;
652 if (io->has_fua)
653 io->current_bio->bi_opf |= REQ_FUA;
654 submit_bio(io->current_bio);
655}
656
657/* deferred io_unit will be dispatched here */
658static void r5l_submit_io_async(struct work_struct *work)
659{
660 struct r5l_log *log = container_of(work, struct r5l_log,
661 deferred_io_work);
662 struct r5l_io_unit *io = NULL;
663 unsigned long flags;
664
665 spin_lock_irqsave(&log->io_list_lock, flags);
666 if (!list_empty(&log->running_ios)) {
667 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
668 log_sibling);
669 if (!io->io_deferred)
670 io = NULL;
671 else
672 io->io_deferred = 0;
673 }
674 spin_unlock_irqrestore(&log->io_list_lock, flags);
675 if (io)
676 r5l_do_submit_io(log, io);
677}
678
679static void r5c_disable_writeback_async(struct work_struct *work)
680{
681 struct r5l_log *log = container_of(work, struct r5l_log,
682 disable_writeback_work);
683 struct mddev *mddev = log->rdev->mddev;
684 struct r5conf *conf = mddev->private;
685
686 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
687 return;
688 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
689 mdname(mddev));
690
691 /* wait superblock change before suspend */
692 wait_event(mddev->sb_wait,
693 !READ_ONCE(conf->log) ||
694 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
695
696 log = READ_ONCE(conf->log);
697 if (log) {
698 mddev_suspend(mddev, false);
699 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
700 mddev_resume(mddev);
701 }
702}
703
704static void r5l_submit_current_io(struct r5l_log *log)
705{
706 struct r5l_io_unit *io = log->current_io;
707 struct r5l_meta_block *block;
708 unsigned long flags;
709 u32 crc;
710 bool do_submit = true;
711
712 if (!io)
713 return;
714
715 block = page_address(io->meta_page);
716 block->meta_size = cpu_to_le32(io->meta_offset);
717 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
718 block->checksum = cpu_to_le32(crc);
719
720 log->current_io = NULL;
721 spin_lock_irqsave(&log->io_list_lock, flags);
722 if (io->has_flush || io->has_fua) {
723 if (io != list_first_entry(&log->running_ios,
724 struct r5l_io_unit, log_sibling)) {
725 io->io_deferred = 1;
726 do_submit = false;
727 }
728 }
729 spin_unlock_irqrestore(&log->io_list_lock, flags);
730 if (do_submit)
731 r5l_do_submit_io(log, io);
732}
733
734static struct bio *r5l_bio_alloc(struct r5l_log *log)
735{
736 struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
737 REQ_OP_WRITE, GFP_NOIO, &log->bs);
738
739 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
740
741 return bio;
742}
743
744static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
745{
746 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
747
748 r5c_update_log_state(log);
749 /*
750 * If we filled up the log device start from the beginning again,
751 * which will require a new bio.
752 *
753 * Note: for this to work properly the log size needs to me a multiple
754 * of BLOCK_SECTORS.
755 */
756 if (log->log_start == 0)
757 io->need_split_bio = true;
758
759 io->log_end = log->log_start;
760}
761
762static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
763{
764 struct r5l_io_unit *io;
765 struct r5l_meta_block *block;
766
767 io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
768 if (!io)
769 return NULL;
770 memset(io, 0, sizeof(*io));
771
772 io->log = log;
773 INIT_LIST_HEAD(&io->log_sibling);
774 INIT_LIST_HEAD(&io->stripe_list);
775 bio_list_init(&io->flush_barriers);
776 io->state = IO_UNIT_RUNNING;
777
778 io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
779 block = page_address(io->meta_page);
780 clear_page(block);
781 block->magic = cpu_to_le32(R5LOG_MAGIC);
782 block->version = R5LOG_VERSION;
783 block->seq = cpu_to_le64(log->seq);
784 block->position = cpu_to_le64(log->log_start);
785
786 io->log_start = log->log_start;
787 io->meta_offset = sizeof(struct r5l_meta_block);
788 io->seq = log->seq++;
789
790 io->current_bio = r5l_bio_alloc(log);
791 io->current_bio->bi_end_io = r5l_log_endio;
792 io->current_bio->bi_private = io;
793 __bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
794
795 r5_reserve_log_entry(log, io);
796
797 spin_lock_irq(&log->io_list_lock);
798 list_add_tail(&io->log_sibling, &log->running_ios);
799 spin_unlock_irq(&log->io_list_lock);
800
801 return io;
802}
803
804static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
805{
806 if (log->current_io &&
807 log->current_io->meta_offset + payload_size > PAGE_SIZE)
808 r5l_submit_current_io(log);
809
810 if (!log->current_io) {
811 log->current_io = r5l_new_meta(log);
812 if (!log->current_io)
813 return -ENOMEM;
814 }
815
816 return 0;
817}
818
819static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
820 sector_t location,
821 u32 checksum1, u32 checksum2,
822 bool checksum2_valid)
823{
824 struct r5l_io_unit *io = log->current_io;
825 struct r5l_payload_data_parity *payload;
826
827 payload = page_address(io->meta_page) + io->meta_offset;
828 payload->header.type = cpu_to_le16(type);
829 payload->header.flags = cpu_to_le16(0);
830 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
831 (PAGE_SHIFT - 9));
832 payload->location = cpu_to_le64(location);
833 payload->checksum[0] = cpu_to_le32(checksum1);
834 if (checksum2_valid)
835 payload->checksum[1] = cpu_to_le32(checksum2);
836
837 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
838 sizeof(__le32) * (1 + !!checksum2_valid);
839}
840
841static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
842{
843 struct r5l_io_unit *io = log->current_io;
844
845 if (io->need_split_bio) {
846 BUG_ON(io->split_bio);
847 io->split_bio = io->current_bio;
848 io->current_bio = r5l_bio_alloc(log);
849 bio_chain(io->current_bio, io->split_bio);
850 io->need_split_bio = false;
851 }
852
853 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
854 BUG();
855
856 r5_reserve_log_entry(log, io);
857}
858
859static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
860{
861 struct mddev *mddev = log->rdev->mddev;
862 struct r5conf *conf = mddev->private;
863 struct r5l_io_unit *io;
864 struct r5l_payload_flush *payload;
865 int meta_size;
866
867 /*
868 * payload_flush requires extra writes to the journal.
869 * To avoid handling the extra IO in quiesce, just skip
870 * flush_payload
871 */
872 if (conf->quiesce)
873 return;
874
875 mutex_lock(&log->io_mutex);
876 meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
877
878 if (r5l_get_meta(log, meta_size)) {
879 mutex_unlock(&log->io_mutex);
880 return;
881 }
882
883 /* current implementation is one stripe per flush payload */
884 io = log->current_io;
885 payload = page_address(io->meta_page) + io->meta_offset;
886 payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
887 payload->header.flags = cpu_to_le16(0);
888 payload->size = cpu_to_le32(sizeof(__le64));
889 payload->flush_stripes[0] = cpu_to_le64(sect);
890 io->meta_offset += meta_size;
891 /* multiple flush payloads count as one pending_stripe */
892 if (!io->has_flush_payload) {
893 io->has_flush_payload = 1;
894 atomic_inc(&io->pending_stripe);
895 }
896 mutex_unlock(&log->io_mutex);
897}
898
899static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
900 int data_pages, int parity_pages)
901{
902 int i;
903 int meta_size;
904 int ret;
905 struct r5l_io_unit *io;
906
907 meta_size =
908 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
909 * data_pages) +
910 sizeof(struct r5l_payload_data_parity) +
911 sizeof(__le32) * parity_pages;
912
913 ret = r5l_get_meta(log, meta_size);
914 if (ret)
915 return ret;
916
917 io = log->current_io;
918
919 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
920 io->has_flush = 1;
921
922 for (i = 0; i < sh->disks; i++) {
923 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
924 test_bit(R5_InJournal, &sh->dev[i].flags))
925 continue;
926 if (i == sh->pd_idx || i == sh->qd_idx)
927 continue;
928 if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
929 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
930 io->has_fua = 1;
931 /*
932 * we need to flush journal to make sure recovery can
933 * reach the data with fua flag
934 */
935 io->has_flush = 1;
936 }
937 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
938 raid5_compute_blocknr(sh, i, 0),
939 sh->dev[i].log_checksum, 0, false);
940 r5l_append_payload_page(log, sh->dev[i].page);
941 }
942
943 if (parity_pages == 2) {
944 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
945 sh->sector, sh->dev[sh->pd_idx].log_checksum,
946 sh->dev[sh->qd_idx].log_checksum, true);
947 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
948 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
949 } else if (parity_pages == 1) {
950 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
951 sh->sector, sh->dev[sh->pd_idx].log_checksum,
952 0, false);
953 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
954 } else /* Just writing data, not parity, in caching phase */
955 BUG_ON(parity_pages != 0);
956
957 list_add_tail(&sh->log_list, &io->stripe_list);
958 atomic_inc(&io->pending_stripe);
959 sh->log_io = io;
960
961 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
962 return 0;
963
964 if (sh->log_start == MaxSector) {
965 BUG_ON(!list_empty(&sh->r5c));
966 sh->log_start = io->log_start;
967 spin_lock_irq(&log->stripe_in_journal_lock);
968 list_add_tail(&sh->r5c,
969 &log->stripe_in_journal_list);
970 spin_unlock_irq(&log->stripe_in_journal_lock);
971 atomic_inc(&log->stripe_in_journal_count);
972 }
973 return 0;
974}
975
976/* add stripe to no_space_stripes, and then wake up reclaim */
977static inline void r5l_add_no_space_stripe(struct r5l_log *log,
978 struct stripe_head *sh)
979{
980 spin_lock(&log->no_space_stripes_lock);
981 list_add_tail(&sh->log_list, &log->no_space_stripes);
982 spin_unlock(&log->no_space_stripes_lock);
983}
984
985/*
986 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
987 * data from log to raid disks), so we shouldn't wait for reclaim here
988 */
989int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
990{
991 struct r5conf *conf = sh->raid_conf;
992 int write_disks = 0;
993 int data_pages, parity_pages;
994 int reserve;
995 int i;
996 int ret = 0;
997 bool wake_reclaim = false;
998
999 if (!log)
1000 return -EAGAIN;
1001 /* Don't support stripe batch */
1002 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1003 test_bit(STRIPE_SYNCING, &sh->state)) {
1004 /* the stripe is written to log, we start writing it to raid */
1005 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
1006 return -EAGAIN;
1007 }
1008
1009 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
1010
1011 for (i = 0; i < sh->disks; i++) {
1012 void *addr;
1013
1014 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
1015 test_bit(R5_InJournal, &sh->dev[i].flags))
1016 continue;
1017
1018 write_disks++;
1019 /* checksum is already calculated in last run */
1020 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1021 continue;
1022 addr = kmap_atomic(sh->dev[i].page);
1023 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1024 addr, PAGE_SIZE);
1025 kunmap_atomic(addr);
1026 }
1027 parity_pages = 1 + !!(sh->qd_idx >= 0);
1028 data_pages = write_disks - parity_pages;
1029
1030 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
1031 /*
1032 * The stripe must enter state machine again to finish the write, so
1033 * don't delay.
1034 */
1035 clear_bit(STRIPE_DELAYED, &sh->state);
1036 atomic_inc(&sh->count);
1037
1038 mutex_lock(&log->io_mutex);
1039 /* meta + data */
1040 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
1041
1042 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1043 if (!r5l_has_free_space(log, reserve)) {
1044 r5l_add_no_space_stripe(log, sh);
1045 wake_reclaim = true;
1046 } else {
1047 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1048 if (ret) {
1049 spin_lock_irq(&log->io_list_lock);
1050 list_add_tail(&sh->log_list,
1051 &log->no_mem_stripes);
1052 spin_unlock_irq(&log->io_list_lock);
1053 }
1054 }
1055 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1056 /*
1057 * log space critical, do not process stripes that are
1058 * not in cache yet (sh->log_start == MaxSector).
1059 */
1060 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1061 sh->log_start == MaxSector) {
1062 r5l_add_no_space_stripe(log, sh);
1063 wake_reclaim = true;
1064 reserve = 0;
1065 } else if (!r5l_has_free_space(log, reserve)) {
1066 if (sh->log_start == log->last_checkpoint)
1067 BUG();
1068 else
1069 r5l_add_no_space_stripe(log, sh);
1070 } else {
1071 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1072 if (ret) {
1073 spin_lock_irq(&log->io_list_lock);
1074 list_add_tail(&sh->log_list,
1075 &log->no_mem_stripes);
1076 spin_unlock_irq(&log->io_list_lock);
1077 }
1078 }
1079 }
1080
1081 mutex_unlock(&log->io_mutex);
1082 if (wake_reclaim)
1083 r5l_wake_reclaim(log, reserve);
1084 return 0;
1085}
1086
1087void r5l_write_stripe_run(struct r5l_log *log)
1088{
1089 if (!log)
1090 return;
1091 mutex_lock(&log->io_mutex);
1092 r5l_submit_current_io(log);
1093 mutex_unlock(&log->io_mutex);
1094}
1095
1096int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1097{
1098 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1099 /*
1100 * in write through (journal only)
1101 * we flush log disk cache first, then write stripe data to
1102 * raid disks. So if bio is finished, the log disk cache is
1103 * flushed already. The recovery guarantees we can recovery
1104 * the bio from log disk, so we don't need to flush again
1105 */
1106 if (bio->bi_iter.bi_size == 0) {
1107 bio_endio(bio);
1108 return 0;
1109 }
1110 bio->bi_opf &= ~REQ_PREFLUSH;
1111 } else {
1112 /* write back (with cache) */
1113 if (bio->bi_iter.bi_size == 0) {
1114 mutex_lock(&log->io_mutex);
1115 r5l_get_meta(log, 0);
1116 bio_list_add(&log->current_io->flush_barriers, bio);
1117 log->current_io->has_flush = 1;
1118 log->current_io->has_null_flush = 1;
1119 atomic_inc(&log->current_io->pending_stripe);
1120 r5l_submit_current_io(log);
1121 mutex_unlock(&log->io_mutex);
1122 return 0;
1123 }
1124 }
1125 return -EAGAIN;
1126}
1127
1128/* This will run after log space is reclaimed */
1129static void r5l_run_no_space_stripes(struct r5l_log *log)
1130{
1131 struct stripe_head *sh;
1132
1133 spin_lock(&log->no_space_stripes_lock);
1134 while (!list_empty(&log->no_space_stripes)) {
1135 sh = list_first_entry(&log->no_space_stripes,
1136 struct stripe_head, log_list);
1137 list_del_init(&sh->log_list);
1138 set_bit(STRIPE_HANDLE, &sh->state);
1139 raid5_release_stripe(sh);
1140 }
1141 spin_unlock(&log->no_space_stripes_lock);
1142}
1143
1144/*
1145 * calculate new last_checkpoint
1146 * for write through mode, returns log->next_checkpoint
1147 * for write back, returns log_start of first sh in stripe_in_journal_list
1148 */
1149static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1150{
1151 struct stripe_head *sh;
1152 struct r5l_log *log = READ_ONCE(conf->log);
1153 sector_t new_cp;
1154 unsigned long flags;
1155
1156 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1157 return log->next_checkpoint;
1158
1159 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1160 if (list_empty(&log->stripe_in_journal_list)) {
1161 /* all stripes flushed */
1162 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1163 return log->next_checkpoint;
1164 }
1165 sh = list_first_entry(&log->stripe_in_journal_list,
1166 struct stripe_head, r5c);
1167 new_cp = sh->log_start;
1168 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1169 return new_cp;
1170}
1171
1172static sector_t r5l_reclaimable_space(struct r5l_log *log)
1173{
1174 struct r5conf *conf = log->rdev->mddev->private;
1175
1176 return r5l_ring_distance(log, log->last_checkpoint,
1177 r5c_calculate_new_cp(conf));
1178}
1179
1180static void r5l_run_no_mem_stripe(struct r5l_log *log)
1181{
1182 struct stripe_head *sh;
1183
1184 lockdep_assert_held(&log->io_list_lock);
1185
1186 if (!list_empty(&log->no_mem_stripes)) {
1187 sh = list_first_entry(&log->no_mem_stripes,
1188 struct stripe_head, log_list);
1189 list_del_init(&sh->log_list);
1190 set_bit(STRIPE_HANDLE, &sh->state);
1191 raid5_release_stripe(sh);
1192 }
1193}
1194
1195static bool r5l_complete_finished_ios(struct r5l_log *log)
1196{
1197 struct r5l_io_unit *io, *next;
1198 bool found = false;
1199
1200 lockdep_assert_held(&log->io_list_lock);
1201
1202 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1203 /* don't change list order */
1204 if (io->state < IO_UNIT_STRIPE_END)
1205 break;
1206
1207 log->next_checkpoint = io->log_start;
1208
1209 list_del(&io->log_sibling);
1210 mempool_free(io, &log->io_pool);
1211 r5l_run_no_mem_stripe(log);
1212
1213 found = true;
1214 }
1215
1216 return found;
1217}
1218
1219static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1220{
1221 struct r5l_log *log = io->log;
1222 struct r5conf *conf = log->rdev->mddev->private;
1223 unsigned long flags;
1224
1225 spin_lock_irqsave(&log->io_list_lock, flags);
1226 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
1227
1228 if (!r5l_complete_finished_ios(log)) {
1229 spin_unlock_irqrestore(&log->io_list_lock, flags);
1230 return;
1231 }
1232
1233 if (r5l_reclaimable_space(log) > log->max_free_space ||
1234 test_bit(R5C_LOG_TIGHT, &conf->cache_state))
1235 r5l_wake_reclaim(log, 0);
1236
1237 spin_unlock_irqrestore(&log->io_list_lock, flags);
1238 wake_up(&log->iounit_wait);
1239}
1240
1241void r5l_stripe_write_finished(struct stripe_head *sh)
1242{
1243 struct r5l_io_unit *io;
1244
1245 io = sh->log_io;
1246 sh->log_io = NULL;
1247
1248 if (io && atomic_dec_and_test(&io->pending_stripe))
1249 __r5l_stripe_write_finished(io);
1250}
1251
1252static void r5l_log_flush_endio(struct bio *bio)
1253{
1254 struct r5l_log *log = container_of(bio, struct r5l_log,
1255 flush_bio);
1256 unsigned long flags;
1257 struct r5l_io_unit *io;
1258
1259 if (bio->bi_status)
1260 md_error(log->rdev->mddev, log->rdev);
1261 bio_uninit(bio);
1262
1263 spin_lock_irqsave(&log->io_list_lock, flags);
1264 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1265 r5l_io_run_stripes(io);
1266 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1267 spin_unlock_irqrestore(&log->io_list_lock, flags);
1268}
1269
1270/*
1271 * Starting dispatch IO to raid.
1272 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1273 * broken meta in the middle of a log causes recovery can't find meta at the
1274 * head of log. If operations require meta at the head persistent in log, we
1275 * must make sure meta before it persistent in log too. A case is:
1276 *
1277 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1278 * data/parity must be persistent in log before we do the write to raid disks.
1279 *
1280 * The solution is we restrictly maintain io_unit list order. In this case, we
1281 * only write stripes of an io_unit to raid disks till the io_unit is the first
1282 * one whose data/parity is in log.
1283 */
1284void r5l_flush_stripe_to_raid(struct r5l_log *log)
1285{
1286 bool do_flush;
1287
1288 if (!log || !log->need_cache_flush)
1289 return;
1290
1291 spin_lock_irq(&log->io_list_lock);
1292 /* flush bio is running */
1293 if (!list_empty(&log->flushing_ios)) {
1294 spin_unlock_irq(&log->io_list_lock);
1295 return;
1296 }
1297 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1298 do_flush = !list_empty(&log->flushing_ios);
1299 spin_unlock_irq(&log->io_list_lock);
1300
1301 if (!do_flush)
1302 return;
1303 bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1304 REQ_OP_WRITE | REQ_PREFLUSH);
1305 log->flush_bio.bi_end_io = r5l_log_flush_endio;
1306 submit_bio(&log->flush_bio);
1307}
1308
1309static void r5l_write_super(struct r5l_log *log, sector_t cp);
1310static void r5l_write_super_and_discard_space(struct r5l_log *log,
1311 sector_t end)
1312{
1313 struct block_device *bdev = log->rdev->bdev;
1314 struct mddev *mddev;
1315
1316 r5l_write_super(log, end);
1317
1318 if (!bdev_max_discard_sectors(bdev))
1319 return;
1320
1321 mddev = log->rdev->mddev;
1322 /*
1323 * Discard could zero data, so before discard we must make sure
1324 * superblock is updated to new log tail. Updating superblock (either
1325 * directly call md_update_sb() or depend on md thread) must hold
1326 * reconfig mutex. On the other hand, raid5_quiesce is called with
1327 * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
1328 * for all IO finish, hence waiting for reclaim thread, while reclaim
1329 * thread is calling this function and waiting for reconfig mutex. So
1330 * there is a deadlock. We workaround this issue with a trylock.
1331 * FIXME: we could miss discard if we can't take reconfig mutex
1332 */
1333 set_mask_bits(&mddev->sb_flags, 0,
1334 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1335 if (!mddev_trylock(mddev))
1336 return;
1337 md_update_sb(mddev, 1);
1338 mddev_unlock(mddev);
1339
1340 /* discard IO error really doesn't matter, ignore it */
1341 if (log->last_checkpoint < end) {
1342 blkdev_issue_discard(bdev,
1343 log->last_checkpoint + log->rdev->data_offset,
1344 end - log->last_checkpoint, GFP_NOIO);
1345 } else {
1346 blkdev_issue_discard(bdev,
1347 log->last_checkpoint + log->rdev->data_offset,
1348 log->device_size - log->last_checkpoint,
1349 GFP_NOIO);
1350 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1351 GFP_NOIO);
1352 }
1353}
1354
1355/*
1356 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1357 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1358 *
1359 * must hold conf->device_lock
1360 */
1361static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1362{
1363 BUG_ON(list_empty(&sh->lru));
1364 BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1365 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1366
1367 /*
1368 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1369 * raid5_release_stripe() while holding conf->device_lock
1370 */
1371 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1372 lockdep_assert_held(&conf->device_lock);
1373
1374 list_del_init(&sh->lru);
1375 atomic_inc(&sh->count);
1376
1377 set_bit(STRIPE_HANDLE, &sh->state);
1378 atomic_inc(&conf->active_stripes);
1379 r5c_make_stripe_write_out(sh);
1380
1381 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1382 atomic_inc(&conf->r5c_flushing_partial_stripes);
1383 else
1384 atomic_inc(&conf->r5c_flushing_full_stripes);
1385 raid5_release_stripe(sh);
1386}
1387
1388/*
1389 * if num == 0, flush all full stripes
1390 * if num > 0, flush all full stripes. If less than num full stripes are
1391 * flushed, flush some partial stripes until totally num stripes are
1392 * flushed or there is no more cached stripes.
1393 */
1394void r5c_flush_cache(struct r5conf *conf, int num)
1395{
1396 int count;
1397 struct stripe_head *sh, *next;
1398
1399 lockdep_assert_held(&conf->device_lock);
1400 if (!READ_ONCE(conf->log))
1401 return;
1402
1403 count = 0;
1404 list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1405 r5c_flush_stripe(conf, sh);
1406 count++;
1407 }
1408
1409 if (count >= num)
1410 return;
1411 list_for_each_entry_safe(sh, next,
1412 &conf->r5c_partial_stripe_list, lru) {
1413 r5c_flush_stripe(conf, sh);
1414 if (++count >= num)
1415 break;
1416 }
1417}
1418
1419static void r5c_do_reclaim(struct r5conf *conf)
1420{
1421 struct r5l_log *log = READ_ONCE(conf->log);
1422 struct stripe_head *sh;
1423 int count = 0;
1424 unsigned long flags;
1425 int total_cached;
1426 int stripes_to_flush;
1427 int flushing_partial, flushing_full;
1428
1429 if (!r5c_is_writeback(log))
1430 return;
1431
1432 flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1433 flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
1434 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
1435 atomic_read(&conf->r5c_cached_full_stripes) -
1436 flushing_full - flushing_partial;
1437
1438 if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1439 atomic_read(&conf->empty_inactive_list_nr) > 0)
1440 /*
1441 * if stripe cache pressure high, flush all full stripes and
1442 * some partial stripes
1443 */
1444 stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1445 else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
1446 atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
1447 R5C_FULL_STRIPE_FLUSH_BATCH(conf))
1448 /*
1449 * if stripe cache pressure moderate, or if there is many full
1450 * stripes,flush all full stripes
1451 */
1452 stripes_to_flush = 0;
1453 else
1454 /* no need to flush */
1455 stripes_to_flush = -1;
1456
1457 if (stripes_to_flush >= 0) {
1458 spin_lock_irqsave(&conf->device_lock, flags);
1459 r5c_flush_cache(conf, stripes_to_flush);
1460 spin_unlock_irqrestore(&conf->device_lock, flags);
1461 }
1462
1463 /* if log space is tight, flush stripes on stripe_in_journal_list */
1464 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1465 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1466 spin_lock(&conf->device_lock);
1467 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1468 /*
1469 * stripes on stripe_in_journal_list could be in any
1470 * state of the stripe_cache state machine. In this
1471 * case, we only want to flush stripe on
1472 * r5c_cached_full/partial_stripes. The following
1473 * condition makes sure the stripe is on one of the
1474 * two lists.
1475 */
1476 if (!list_empty(&sh->lru) &&
1477 !test_bit(STRIPE_HANDLE, &sh->state) &&
1478 atomic_read(&sh->count) == 0) {
1479 r5c_flush_stripe(conf, sh);
1480 if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1481 break;
1482 }
1483 }
1484 spin_unlock(&conf->device_lock);
1485 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1486 }
1487
1488 if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1489 r5l_run_no_space_stripes(log);
1490
1491 md_wakeup_thread(conf->mddev->thread);
1492}
1493
1494static void r5l_do_reclaim(struct r5l_log *log)
1495{
1496 struct r5conf *conf = log->rdev->mddev->private;
1497 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1498 sector_t reclaimable;
1499 sector_t next_checkpoint;
1500 bool write_super;
1501
1502 spin_lock_irq(&log->io_list_lock);
1503 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1504 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1505 /*
1506 * move proper io_unit to reclaim list. We should not change the order.
1507 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1508 * shouldn't reuse space of an unreclaimable io_unit
1509 */
1510 while (1) {
1511 reclaimable = r5l_reclaimable_space(log);
1512 if (reclaimable >= reclaim_target ||
1513 (list_empty(&log->running_ios) &&
1514 list_empty(&log->io_end_ios) &&
1515 list_empty(&log->flushing_ios) &&
1516 list_empty(&log->finished_ios)))
1517 break;
1518
1519 md_wakeup_thread(log->rdev->mddev->thread);
1520 wait_event_lock_irq(log->iounit_wait,
1521 r5l_reclaimable_space(log) > reclaimable,
1522 log->io_list_lock);
1523 }
1524
1525 next_checkpoint = r5c_calculate_new_cp(conf);
1526 spin_unlock_irq(&log->io_list_lock);
1527
1528 if (reclaimable == 0 || !write_super)
1529 return;
1530
1531 /*
1532 * write_super will flush cache of each raid disk. We must write super
1533 * here, because the log area might be reused soon and we don't want to
1534 * confuse recovery
1535 */
1536 r5l_write_super_and_discard_space(log, next_checkpoint);
1537
1538 mutex_lock(&log->io_mutex);
1539 log->last_checkpoint = next_checkpoint;
1540 r5c_update_log_state(log);
1541 mutex_unlock(&log->io_mutex);
1542
1543 r5l_run_no_space_stripes(log);
1544}
1545
1546static void r5l_reclaim_thread(struct md_thread *thread)
1547{
1548 struct mddev *mddev = thread->mddev;
1549 struct r5conf *conf = mddev->private;
1550 struct r5l_log *log = READ_ONCE(conf->log);
1551
1552 if (!log)
1553 return;
1554 r5c_do_reclaim(conf);
1555 r5l_do_reclaim(log);
1556}
1557
1558void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1559{
1560 unsigned long target;
1561 unsigned long new = (unsigned long)space; /* overflow in theory */
1562
1563 if (!log)
1564 return;
1565
1566 target = READ_ONCE(log->reclaim_target);
1567 do {
1568 if (new < target)
1569 return;
1570 } while (!try_cmpxchg(&log->reclaim_target, &target, new));
1571 md_wakeup_thread(log->reclaim_thread);
1572}
1573
1574void r5l_quiesce(struct r5l_log *log, int quiesce)
1575{
1576 struct mddev *mddev = log->rdev->mddev;
1577 struct md_thread *thread = rcu_dereference_protected(
1578 log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1579
1580 if (quiesce) {
1581 /* make sure r5l_write_super_and_discard_space exits */
1582 wake_up(&mddev->sb_wait);
1583 kthread_park(thread->tsk);
1584 r5l_wake_reclaim(log, MaxSector);
1585 r5l_do_reclaim(log);
1586 } else
1587 kthread_unpark(thread->tsk);
1588}
1589
1590bool r5l_log_disk_error(struct r5conf *conf)
1591{
1592 struct r5l_log *log = READ_ONCE(conf->log);
1593
1594 /* don't allow write if journal disk is missing */
1595 if (!log)
1596 return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1597 else
1598 return test_bit(Faulty, &log->rdev->flags);
1599}
1600
1601#define R5L_RECOVERY_PAGE_POOL_SIZE 256
1602
1603struct r5l_recovery_ctx {
1604 struct page *meta_page; /* current meta */
1605 sector_t meta_total_blocks; /* total size of current meta and data */
1606 sector_t pos; /* recovery position */
1607 u64 seq; /* recovery position seq */
1608 int data_parity_stripes; /* number of data_parity stripes */
1609 int data_only_stripes; /* number of data_only stripes */
1610 struct list_head cached_list;
1611
1612 /*
1613 * read ahead page pool (ra_pool)
1614 * in recovery, log is read sequentially. It is not efficient to
1615 * read every page with sync_page_io(). The read ahead page pool
1616 * reads multiple pages with one IO, so further log read can
1617 * just copy data from the pool.
1618 */
1619 struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
1620 struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
1621 sector_t pool_offset; /* offset of first page in the pool */
1622 int total_pages; /* total allocated pages */
1623 int valid_pages; /* pages with valid data */
1624};
1625
1626static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1627 struct r5l_recovery_ctx *ctx)
1628{
1629 struct page *page;
1630
1631 ctx->valid_pages = 0;
1632 ctx->total_pages = 0;
1633 while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1634 page = alloc_page(GFP_KERNEL);
1635
1636 if (!page)
1637 break;
1638 ctx->ra_pool[ctx->total_pages] = page;
1639 ctx->total_pages += 1;
1640 }
1641
1642 if (ctx->total_pages == 0)
1643 return -ENOMEM;
1644
1645 ctx->pool_offset = 0;
1646 return 0;
1647}
1648
1649static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1650 struct r5l_recovery_ctx *ctx)
1651{
1652 int i;
1653
1654 for (i = 0; i < ctx->total_pages; ++i)
1655 put_page(ctx->ra_pool[i]);
1656}
1657
1658/*
1659 * fetch ctx->valid_pages pages from offset
1660 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1661 * However, if the offset is close to the end of the journal device,
1662 * ctx->valid_pages could be smaller than ctx->total_pages
1663 */
1664static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1665 struct r5l_recovery_ctx *ctx,
1666 sector_t offset)
1667{
1668 struct bio bio;
1669 int ret;
1670
1671 bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1672 R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
1673 bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1674
1675 ctx->valid_pages = 0;
1676 ctx->pool_offset = offset;
1677
1678 while (ctx->valid_pages < ctx->total_pages) {
1679 __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
1680 0);
1681 ctx->valid_pages += 1;
1682
1683 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1684
1685 if (offset == 0) /* reached end of the device */
1686 break;
1687 }
1688
1689 ret = submit_bio_wait(&bio);
1690 bio_uninit(&bio);
1691 return ret;
1692}
1693
1694/*
1695 * try read a page from the read ahead page pool, if the page is not in the
1696 * pool, call r5l_recovery_fetch_ra_pool
1697 */
1698static int r5l_recovery_read_page(struct r5l_log *log,
1699 struct r5l_recovery_ctx *ctx,
1700 struct page *page,
1701 sector_t offset)
1702{
1703 int ret;
1704
1705 if (offset < ctx->pool_offset ||
1706 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1707 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1708 if (ret)
1709 return ret;
1710 }
1711
1712 BUG_ON(offset < ctx->pool_offset ||
1713 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1714
1715 memcpy(page_address(page),
1716 page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1717 BLOCK_SECTOR_SHIFT]),
1718 PAGE_SIZE);
1719 return 0;
1720}
1721
1722static int r5l_recovery_read_meta_block(struct r5l_log *log,
1723 struct r5l_recovery_ctx *ctx)
1724{
1725 struct page *page = ctx->meta_page;
1726 struct r5l_meta_block *mb;
1727 u32 crc, stored_crc;
1728 int ret;
1729
1730 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1731 if (ret != 0)
1732 return ret;
1733
1734 mb = page_address(page);
1735 stored_crc = le32_to_cpu(mb->checksum);
1736 mb->checksum = 0;
1737
1738 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1739 le64_to_cpu(mb->seq) != ctx->seq ||
1740 mb->version != R5LOG_VERSION ||
1741 le64_to_cpu(mb->position) != ctx->pos)
1742 return -EINVAL;
1743
1744 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1745 if (stored_crc != crc)
1746 return -EINVAL;
1747
1748 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1749 return -EINVAL;
1750
1751 ctx->meta_total_blocks = BLOCK_SECTORS;
1752
1753 return 0;
1754}
1755
1756static void
1757r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1758 struct page *page,
1759 sector_t pos, u64 seq)
1760{
1761 struct r5l_meta_block *mb;
1762
1763 mb = page_address(page);
1764 clear_page(mb);
1765 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1766 mb->version = R5LOG_VERSION;
1767 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1768 mb->seq = cpu_to_le64(seq);
1769 mb->position = cpu_to_le64(pos);
1770}
1771
1772static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1773 u64 seq)
1774{
1775 struct page *page;
1776 struct r5l_meta_block *mb;
1777
1778 page = alloc_page(GFP_KERNEL);
1779 if (!page)
1780 return -ENOMEM;
1781 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1782 mb = page_address(page);
1783 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1784 mb, PAGE_SIZE));
1785 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
1786 REQ_SYNC | REQ_FUA, false)) {
1787 __free_page(page);
1788 return -EIO;
1789 }
1790 __free_page(page);
1791 return 0;
1792}
1793
1794/*
1795 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1796 * to mark valid (potentially not flushed) data in the journal.
1797 *
1798 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1799 * so there should not be any mismatch here.
1800 */
1801static void r5l_recovery_load_data(struct r5l_log *log,
1802 struct stripe_head *sh,
1803 struct r5l_recovery_ctx *ctx,
1804 struct r5l_payload_data_parity *payload,
1805 sector_t log_offset)
1806{
1807 struct mddev *mddev = log->rdev->mddev;
1808 struct r5conf *conf = mddev->private;
1809 int dd_idx;
1810
1811 raid5_compute_sector(conf,
1812 le64_to_cpu(payload->location), 0,
1813 &dd_idx, sh);
1814 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1815 sh->dev[dd_idx].log_checksum =
1816 le32_to_cpu(payload->checksum[0]);
1817 ctx->meta_total_blocks += BLOCK_SECTORS;
1818
1819 set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1820 set_bit(STRIPE_R5C_CACHING, &sh->state);
1821}
1822
1823static void r5l_recovery_load_parity(struct r5l_log *log,
1824 struct stripe_head *sh,
1825 struct r5l_recovery_ctx *ctx,
1826 struct r5l_payload_data_parity *payload,
1827 sector_t log_offset)
1828{
1829 struct mddev *mddev = log->rdev->mddev;
1830 struct r5conf *conf = mddev->private;
1831
1832 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1833 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1834 sh->dev[sh->pd_idx].log_checksum =
1835 le32_to_cpu(payload->checksum[0]);
1836 set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1837
1838 if (sh->qd_idx >= 0) {
1839 r5l_recovery_read_page(
1840 log, ctx, sh->dev[sh->qd_idx].page,
1841 r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1842 sh->dev[sh->qd_idx].log_checksum =
1843 le32_to_cpu(payload->checksum[1]);
1844 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1845 }
1846 clear_bit(STRIPE_R5C_CACHING, &sh->state);
1847}
1848
1849static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1850{
1851 int i;
1852
1853 sh->state = 0;
1854 sh->log_start = MaxSector;
1855 for (i = sh->disks; i--; )
1856 sh->dev[i].flags = 0;
1857}
1858
1859static void
1860r5l_recovery_replay_one_stripe(struct r5conf *conf,
1861 struct stripe_head *sh,
1862 struct r5l_recovery_ctx *ctx)
1863{
1864 struct md_rdev *rdev, *rrdev;
1865 int disk_index;
1866 int data_count = 0;
1867
1868 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1869 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1870 continue;
1871 if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1872 continue;
1873 data_count++;
1874 }
1875
1876 /*
1877 * stripes that only have parity must have been flushed
1878 * before the crash that we are now recovering from, so
1879 * there is nothing more to recovery.
1880 */
1881 if (data_count == 0)
1882 goto out;
1883
1884 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1885 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1886 continue;
1887
1888 /* in case device is broken */
1889 rdev = conf->disks[disk_index].rdev;
1890 if (rdev) {
1891 atomic_inc(&rdev->nr_pending);
1892 sync_page_io(rdev, sh->sector, PAGE_SIZE,
1893 sh->dev[disk_index].page, REQ_OP_WRITE,
1894 false);
1895 rdev_dec_pending(rdev, rdev->mddev);
1896 }
1897 rrdev = conf->disks[disk_index].replacement;
1898 if (rrdev) {
1899 atomic_inc(&rrdev->nr_pending);
1900 sync_page_io(rrdev, sh->sector, PAGE_SIZE,
1901 sh->dev[disk_index].page, REQ_OP_WRITE,
1902 false);
1903 rdev_dec_pending(rrdev, rrdev->mddev);
1904 }
1905 }
1906 ctx->data_parity_stripes++;
1907out:
1908 r5l_recovery_reset_stripe(sh);
1909}
1910
1911static struct stripe_head *
1912r5c_recovery_alloc_stripe(
1913 struct r5conf *conf,
1914 sector_t stripe_sect,
1915 int noblock)
1916{
1917 struct stripe_head *sh;
1918
1919 sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
1920 noblock ? R5_GAS_NOBLOCK : 0);
1921 if (!sh)
1922 return NULL; /* no more stripe available */
1923
1924 r5l_recovery_reset_stripe(sh);
1925
1926 return sh;
1927}
1928
1929static struct stripe_head *
1930r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1931{
1932 struct stripe_head *sh;
1933
1934 list_for_each_entry(sh, list, lru)
1935 if (sh->sector == sect)
1936 return sh;
1937 return NULL;
1938}
1939
1940static void
1941r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1942 struct r5l_recovery_ctx *ctx)
1943{
1944 struct stripe_head *sh, *next;
1945
1946 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1947 r5l_recovery_reset_stripe(sh);
1948 list_del_init(&sh->lru);
1949 raid5_release_stripe(sh);
1950 }
1951}
1952
1953static void
1954r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1955 struct r5l_recovery_ctx *ctx)
1956{
1957 struct stripe_head *sh, *next;
1958
1959 list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1960 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1961 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1962 list_del_init(&sh->lru);
1963 raid5_release_stripe(sh);
1964 }
1965}
1966
1967/* if matches return 0; otherwise return -EINVAL */
1968static int
1969r5l_recovery_verify_data_checksum(struct r5l_log *log,
1970 struct r5l_recovery_ctx *ctx,
1971 struct page *page,
1972 sector_t log_offset, __le32 log_checksum)
1973{
1974 void *addr;
1975 u32 checksum;
1976
1977 r5l_recovery_read_page(log, ctx, page, log_offset);
1978 addr = kmap_atomic(page);
1979 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1980 kunmap_atomic(addr);
1981 return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1982}
1983
1984/*
1985 * before loading data to stripe cache, we need verify checksum for all data,
1986 * if there is mismatch for any data page, we drop all data in the mata block
1987 */
1988static int
1989r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1990 struct r5l_recovery_ctx *ctx)
1991{
1992 struct mddev *mddev = log->rdev->mddev;
1993 struct r5conf *conf = mddev->private;
1994 struct r5l_meta_block *mb = page_address(ctx->meta_page);
1995 sector_t mb_offset = sizeof(struct r5l_meta_block);
1996 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
1997 struct page *page;
1998 struct r5l_payload_data_parity *payload;
1999 struct r5l_payload_flush *payload_flush;
2000
2001 page = alloc_page(GFP_KERNEL);
2002 if (!page)
2003 return -ENOMEM;
2004
2005 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2006 payload = (void *)mb + mb_offset;
2007 payload_flush = (void *)mb + mb_offset;
2008
2009 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2010 if (r5l_recovery_verify_data_checksum(
2011 log, ctx, page, log_offset,
2012 payload->checksum[0]) < 0)
2013 goto mismatch;
2014 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2015 if (r5l_recovery_verify_data_checksum(
2016 log, ctx, page, log_offset,
2017 payload->checksum[0]) < 0)
2018 goto mismatch;
2019 if (conf->max_degraded == 2 && /* q for RAID 6 */
2020 r5l_recovery_verify_data_checksum(
2021 log, ctx, page,
2022 r5l_ring_add(log, log_offset,
2023 BLOCK_SECTORS),
2024 payload->checksum[1]) < 0)
2025 goto mismatch;
2026 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2027 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2028 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2029 goto mismatch;
2030
2031 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2032 mb_offset += sizeof(struct r5l_payload_flush) +
2033 le32_to_cpu(payload_flush->size);
2034 } else {
2035 /* DATA or PARITY payload */
2036 log_offset = r5l_ring_add(log, log_offset,
2037 le32_to_cpu(payload->size));
2038 mb_offset += sizeof(struct r5l_payload_data_parity) +
2039 sizeof(__le32) *
2040 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2041 }
2042
2043 }
2044
2045 put_page(page);
2046 return 0;
2047
2048mismatch:
2049 put_page(page);
2050 return -EINVAL;
2051}
2052
2053/*
2054 * Analyze all data/parity pages in one meta block
2055 * Returns:
2056 * 0 for success
2057 * -EINVAL for unknown playload type
2058 * -EAGAIN for checksum mismatch of data page
2059 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2060 */
2061static int
2062r5c_recovery_analyze_meta_block(struct r5l_log *log,
2063 struct r5l_recovery_ctx *ctx,
2064 struct list_head *cached_stripe_list)
2065{
2066 struct mddev *mddev = log->rdev->mddev;
2067 struct r5conf *conf = mddev->private;
2068 struct r5l_meta_block *mb;
2069 struct r5l_payload_data_parity *payload;
2070 struct r5l_payload_flush *payload_flush;
2071 int mb_offset;
2072 sector_t log_offset;
2073 sector_t stripe_sect;
2074 struct stripe_head *sh;
2075 int ret;
2076
2077 /*
2078 * for mismatch in data blocks, we will drop all data in this mb, but
2079 * we will still read next mb for other data with FLUSH flag, as
2080 * io_unit could finish out of order.
2081 */
2082 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2083 if (ret == -EINVAL)
2084 return -EAGAIN;
2085 else if (ret)
2086 return ret; /* -ENOMEM duo to alloc_page() failed */
2087
2088 mb = page_address(ctx->meta_page);
2089 mb_offset = sizeof(struct r5l_meta_block);
2090 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2091
2092 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2093 int dd;
2094
2095 payload = (void *)mb + mb_offset;
2096 payload_flush = (void *)mb + mb_offset;
2097
2098 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2099 int i, count;
2100
2101 count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
2102 for (i = 0; i < count; ++i) {
2103 stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
2104 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2105 stripe_sect);
2106 if (sh) {
2107 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2108 r5l_recovery_reset_stripe(sh);
2109 list_del_init(&sh->lru);
2110 raid5_release_stripe(sh);
2111 }
2112 }
2113
2114 mb_offset += sizeof(struct r5l_payload_flush) +
2115 le32_to_cpu(payload_flush->size);
2116 continue;
2117 }
2118
2119 /* DATA or PARITY payload */
2120 stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2121 raid5_compute_sector(
2122 conf, le64_to_cpu(payload->location), 0, &dd,
2123 NULL)
2124 : le64_to_cpu(payload->location);
2125
2126 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2127 stripe_sect);
2128
2129 if (!sh) {
2130 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2131 /*
2132 * cannot get stripe from raid5_get_active_stripe
2133 * try replay some stripes
2134 */
2135 if (!sh) {
2136 r5c_recovery_replay_stripes(
2137 cached_stripe_list, ctx);
2138 sh = r5c_recovery_alloc_stripe(
2139 conf, stripe_sect, 1);
2140 }
2141 if (!sh) {
2142 int new_size = conf->min_nr_stripes * 2;
2143 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2144 mdname(mddev),
2145 new_size);
2146 ret = raid5_set_cache_size(mddev, new_size);
2147 if (conf->min_nr_stripes <= new_size / 2) {
2148 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2149 mdname(mddev),
2150 ret,
2151 new_size,
2152 conf->min_nr_stripes,
2153 conf->max_nr_stripes);
2154 return -ENOMEM;
2155 }
2156 sh = r5c_recovery_alloc_stripe(
2157 conf, stripe_sect, 0);
2158 }
2159 if (!sh) {
2160 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2161 mdname(mddev));
2162 return -ENOMEM;
2163 }
2164 list_add_tail(&sh->lru, cached_stripe_list);
2165 }
2166
2167 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2168 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2169 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2170 r5l_recovery_replay_one_stripe(conf, sh, ctx);
2171 list_move_tail(&sh->lru, cached_stripe_list);
2172 }
2173 r5l_recovery_load_data(log, sh, ctx, payload,
2174 log_offset);
2175 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2176 r5l_recovery_load_parity(log, sh, ctx, payload,
2177 log_offset);
2178 else
2179 return -EINVAL;
2180
2181 log_offset = r5l_ring_add(log, log_offset,
2182 le32_to_cpu(payload->size));
2183
2184 mb_offset += sizeof(struct r5l_payload_data_parity) +
2185 sizeof(__le32) *
2186 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2187 }
2188
2189 return 0;
2190}
2191
2192/*
2193 * Load the stripe into cache. The stripe will be written out later by
2194 * the stripe cache state machine.
2195 */
2196static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2197 struct stripe_head *sh)
2198{
2199 struct r5dev *dev;
2200 int i;
2201
2202 for (i = sh->disks; i--; ) {
2203 dev = sh->dev + i;
2204 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2205 set_bit(R5_InJournal, &dev->flags);
2206 set_bit(R5_UPTODATE, &dev->flags);
2207 }
2208 }
2209}
2210
2211/*
2212 * Scan through the log for all to-be-flushed data
2213 *
2214 * For stripes with data and parity, namely Data-Parity stripe
2215 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2216 *
2217 * For stripes with only data, namely Data-Only stripe
2218 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2219 *
2220 * For a stripe, if we see data after parity, we should discard all previous
2221 * data and parity for this stripe, as these data are already flushed to
2222 * the array.
2223 *
2224 * At the end of the scan, we return the new journal_tail, which points to
2225 * first data-only stripe on the journal device, or next invalid meta block.
2226 */
2227static int r5c_recovery_flush_log(struct r5l_log *log,
2228 struct r5l_recovery_ctx *ctx)
2229{
2230 struct stripe_head *sh;
2231 int ret = 0;
2232
2233 /* scan through the log */
2234 while (1) {
2235 if (r5l_recovery_read_meta_block(log, ctx))
2236 break;
2237
2238 ret = r5c_recovery_analyze_meta_block(log, ctx,
2239 &ctx->cached_list);
2240 /*
2241 * -EAGAIN means mismatch in data block, in this case, we still
2242 * try scan the next metablock
2243 */
2244 if (ret && ret != -EAGAIN)
2245 break; /* ret == -EINVAL or -ENOMEM */
2246 ctx->seq++;
2247 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2248 }
2249
2250 if (ret == -ENOMEM) {
2251 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2252 return ret;
2253 }
2254
2255 /* replay data-parity stripes */
2256 r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2257
2258 /* load data-only stripes to stripe cache */
2259 list_for_each_entry(sh, &ctx->cached_list, lru) {
2260 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2261 r5c_recovery_load_one_stripe(log, sh);
2262 ctx->data_only_stripes++;
2263 }
2264
2265 return 0;
2266}
2267
2268/*
2269 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2270 * log will start here. but we can't let superblock point to last valid
2271 * meta block. The log might looks like:
2272 * | meta 1| meta 2| meta 3|
2273 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2274 * superblock points to meta 1, we write a new valid meta 2n. if crash
2275 * happens again, new recovery will start from meta 1. Since meta 2n is
2276 * valid now, recovery will think meta 3 is valid, which is wrong.
2277 * The solution is we create a new meta in meta2 with its seq == meta
2278 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2279 * will not think meta 3 is a valid meta, because its seq doesn't match
2280 */
2281
2282/*
2283 * Before recovery, the log looks like the following
2284 *
2285 * ---------------------------------------------
2286 * | valid log | invalid log |
2287 * ---------------------------------------------
2288 * ^
2289 * |- log->last_checkpoint
2290 * |- log->last_cp_seq
2291 *
2292 * Now we scan through the log until we see invalid entry
2293 *
2294 * ---------------------------------------------
2295 * | valid log | invalid log |
2296 * ---------------------------------------------
2297 * ^ ^
2298 * |- log->last_checkpoint |- ctx->pos
2299 * |- log->last_cp_seq |- ctx->seq
2300 *
2301 * From this point, we need to increase seq number by 10 to avoid
2302 * confusing next recovery.
2303 *
2304 * ---------------------------------------------
2305 * | valid log | invalid log |
2306 * ---------------------------------------------
2307 * ^ ^
2308 * |- log->last_checkpoint |- ctx->pos+1
2309 * |- log->last_cp_seq |- ctx->seq+10001
2310 *
2311 * However, it is not safe to start the state machine yet, because data only
2312 * parities are not yet secured in RAID. To save these data only parities, we
2313 * rewrite them from seq+11.
2314 *
2315 * -----------------------------------------------------------------
2316 * | valid log | data only stripes | invalid log |
2317 * -----------------------------------------------------------------
2318 * ^ ^
2319 * |- log->last_checkpoint |- ctx->pos+n
2320 * |- log->last_cp_seq |- ctx->seq+10000+n
2321 *
2322 * If failure happens again during this process, the recovery can safe start
2323 * again from log->last_checkpoint.
2324 *
2325 * Once data only stripes are rewritten to journal, we move log_tail
2326 *
2327 * -----------------------------------------------------------------
2328 * | old log | data only stripes | invalid log |
2329 * -----------------------------------------------------------------
2330 * ^ ^
2331 * |- log->last_checkpoint |- ctx->pos+n
2332 * |- log->last_cp_seq |- ctx->seq+10000+n
2333 *
2334 * Then we can safely start the state machine. If failure happens from this
2335 * point on, the recovery will start from new log->last_checkpoint.
2336 */
2337static int
2338r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2339 struct r5l_recovery_ctx *ctx)
2340{
2341 struct stripe_head *sh;
2342 struct mddev *mddev = log->rdev->mddev;
2343 struct page *page;
2344 sector_t next_checkpoint = MaxSector;
2345
2346 page = alloc_page(GFP_KERNEL);
2347 if (!page) {
2348 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2349 mdname(mddev));
2350 return -ENOMEM;
2351 }
2352
2353 WARN_ON(list_empty(&ctx->cached_list));
2354
2355 list_for_each_entry(sh, &ctx->cached_list, lru) {
2356 struct r5l_meta_block *mb;
2357 int i;
2358 int offset;
2359 sector_t write_pos;
2360
2361 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2362 r5l_recovery_create_empty_meta_block(log, page,
2363 ctx->pos, ctx->seq);
2364 mb = page_address(page);
2365 offset = le32_to_cpu(mb->meta_size);
2366 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2367
2368 for (i = sh->disks; i--; ) {
2369 struct r5dev *dev = &sh->dev[i];
2370 struct r5l_payload_data_parity *payload;
2371 void *addr;
2372
2373 if (test_bit(R5_InJournal, &dev->flags)) {
2374 payload = (void *)mb + offset;
2375 payload->header.type = cpu_to_le16(
2376 R5LOG_PAYLOAD_DATA);
2377 payload->size = cpu_to_le32(BLOCK_SECTORS);
2378 payload->location = cpu_to_le64(
2379 raid5_compute_blocknr(sh, i, 0));
2380 addr = kmap_atomic(dev->page);
2381 payload->checksum[0] = cpu_to_le32(
2382 crc32c_le(log->uuid_checksum, addr,
2383 PAGE_SIZE));
2384 kunmap_atomic(addr);
2385 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2386 dev->page, REQ_OP_WRITE, false);
2387 write_pos = r5l_ring_add(log, write_pos,
2388 BLOCK_SECTORS);
2389 offset += sizeof(__le32) +
2390 sizeof(struct r5l_payload_data_parity);
2391
2392 }
2393 }
2394 mb->meta_size = cpu_to_le32(offset);
2395 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2396 mb, PAGE_SIZE));
2397 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2398 REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
2399 sh->log_start = ctx->pos;
2400 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2401 atomic_inc(&log->stripe_in_journal_count);
2402 ctx->pos = write_pos;
2403 ctx->seq += 1;
2404 next_checkpoint = sh->log_start;
2405 }
2406 log->next_checkpoint = next_checkpoint;
2407 __free_page(page);
2408 return 0;
2409}
2410
2411static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2412 struct r5l_recovery_ctx *ctx)
2413{
2414 struct mddev *mddev = log->rdev->mddev;
2415 struct r5conf *conf = mddev->private;
2416 struct stripe_head *sh, *next;
2417 bool cleared_pending = false;
2418
2419 if (ctx->data_only_stripes == 0)
2420 return;
2421
2422 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2423 cleared_pending = true;
2424 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2425 }
2426 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2427
2428 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2429 r5c_make_stripe_write_out(sh);
2430 set_bit(STRIPE_HANDLE, &sh->state);
2431 list_del_init(&sh->lru);
2432 raid5_release_stripe(sh);
2433 }
2434
2435 /* reuse conf->wait_for_quiescent in recovery */
2436 wait_event(conf->wait_for_quiescent,
2437 atomic_read(&conf->active_stripes) == 0);
2438
2439 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2440 if (cleared_pending)
2441 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2442}
2443
2444static int r5l_recovery_log(struct r5l_log *log)
2445{
2446 struct mddev *mddev = log->rdev->mddev;
2447 struct r5l_recovery_ctx *ctx;
2448 int ret;
2449 sector_t pos;
2450
2451 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2452 if (!ctx)
2453 return -ENOMEM;
2454
2455 ctx->pos = log->last_checkpoint;
2456 ctx->seq = log->last_cp_seq;
2457 INIT_LIST_HEAD(&ctx->cached_list);
2458 ctx->meta_page = alloc_page(GFP_KERNEL);
2459
2460 if (!ctx->meta_page) {
2461 ret = -ENOMEM;
2462 goto meta_page;
2463 }
2464
2465 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2466 ret = -ENOMEM;
2467 goto ra_pool;
2468 }
2469
2470 ret = r5c_recovery_flush_log(log, ctx);
2471
2472 if (ret)
2473 goto error;
2474
2475 pos = ctx->pos;
2476 ctx->seq += 10000;
2477
2478 if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
2479 pr_info("md/raid:%s: starting from clean shutdown\n",
2480 mdname(mddev));
2481 else
2482 pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2483 mdname(mddev), ctx->data_only_stripes,
2484 ctx->data_parity_stripes);
2485
2486 if (ctx->data_only_stripes == 0) {
2487 log->next_checkpoint = ctx->pos;
2488 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2489 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2490 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2491 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2492 mdname(mddev));
2493 ret = -EIO;
2494 goto error;
2495 }
2496
2497 log->log_start = ctx->pos;
2498 log->seq = ctx->seq;
2499 log->last_checkpoint = pos;
2500 r5l_write_super(log, pos);
2501
2502 r5c_recovery_flush_data_only_stripes(log, ctx);
2503 ret = 0;
2504error:
2505 r5l_recovery_free_ra_pool(log, ctx);
2506ra_pool:
2507 __free_page(ctx->meta_page);
2508meta_page:
2509 kfree(ctx);
2510 return ret;
2511}
2512
2513static void r5l_write_super(struct r5l_log *log, sector_t cp)
2514{
2515 struct mddev *mddev = log->rdev->mddev;
2516
2517 log->rdev->journal_tail = cp;
2518 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2519}
2520
2521static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2522{
2523 struct r5conf *conf;
2524 int ret;
2525
2526 ret = mddev_lock(mddev);
2527 if (ret)
2528 return ret;
2529
2530 conf = mddev->private;
2531 if (!conf || !conf->log)
2532 goto out_unlock;
2533
2534 switch (conf->log->r5c_journal_mode) {
2535 case R5C_JOURNAL_MODE_WRITE_THROUGH:
2536 ret = snprintf(
2537 page, PAGE_SIZE, "[%s] %s\n",
2538 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2539 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2540 break;
2541 case R5C_JOURNAL_MODE_WRITE_BACK:
2542 ret = snprintf(
2543 page, PAGE_SIZE, "%s [%s]\n",
2544 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2545 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2546 break;
2547 default:
2548 ret = 0;
2549 }
2550
2551out_unlock:
2552 mddev_unlock(mddev);
2553 return ret;
2554}
2555
2556/*
2557 * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2558 *
2559 * @mode as defined in 'enum r5c_journal_mode'.
2560 *
2561 */
2562int r5c_journal_mode_set(struct mddev *mddev, int mode)
2563{
2564 struct r5conf *conf;
2565
2566 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2567 mode > R5C_JOURNAL_MODE_WRITE_BACK)
2568 return -EINVAL;
2569
2570 conf = mddev->private;
2571 if (!conf || !conf->log)
2572 return -ENODEV;
2573
2574 if (raid5_calc_degraded(conf) > 0 &&
2575 mode == R5C_JOURNAL_MODE_WRITE_BACK)
2576 return -EINVAL;
2577
2578 conf->log->r5c_journal_mode = mode;
2579
2580 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2581 mdname(mddev), mode, r5c_journal_mode_str[mode]);
2582 return 0;
2583}
2584EXPORT_SYMBOL(r5c_journal_mode_set);
2585
2586static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2587 const char *page, size_t length)
2588{
2589 int mode = ARRAY_SIZE(r5c_journal_mode_str);
2590 size_t len = length;
2591 int ret;
2592
2593 if (len < 2)
2594 return -EINVAL;
2595
2596 if (page[len - 1] == '\n')
2597 len--;
2598
2599 while (mode--)
2600 if (strlen(r5c_journal_mode_str[mode]) == len &&
2601 !strncmp(page, r5c_journal_mode_str[mode], len))
2602 break;
2603 ret = mddev_suspend_and_lock(mddev);
2604 if (ret)
2605 return ret;
2606 ret = r5c_journal_mode_set(mddev, mode);
2607 mddev_unlock_and_resume(mddev);
2608 return ret ?: length;
2609}
2610
2611struct md_sysfs_entry
2612r5c_journal_mode = __ATTR(journal_mode, 0644,
2613 r5c_journal_mode_show, r5c_journal_mode_store);
2614
2615/*
2616 * Try handle write operation in caching phase. This function should only
2617 * be called in write-back mode.
2618 *
2619 * If all outstanding writes can be handled in caching phase, returns 0
2620 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2621 * and returns -EAGAIN
2622 */
2623int r5c_try_caching_write(struct r5conf *conf,
2624 struct stripe_head *sh,
2625 struct stripe_head_state *s,
2626 int disks)
2627{
2628 struct r5l_log *log = READ_ONCE(conf->log);
2629 int i;
2630 struct r5dev *dev;
2631 int to_cache = 0;
2632 void __rcu **pslot;
2633 sector_t tree_index;
2634 int ret;
2635 uintptr_t refcount;
2636
2637 BUG_ON(!r5c_is_writeback(log));
2638
2639 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
2640 /*
2641 * There are two different scenarios here:
2642 * 1. The stripe has some data cached, and it is sent to
2643 * write-out phase for reclaim
2644 * 2. The stripe is clean, and this is the first write
2645 *
2646 * For 1, return -EAGAIN, so we continue with
2647 * handle_stripe_dirtying().
2648 *
2649 * For 2, set STRIPE_R5C_CACHING and continue with caching
2650 * write.
2651 */
2652
2653 /* case 1: anything injournal or anything in written */
2654 if (s->injournal > 0 || s->written > 0)
2655 return -EAGAIN;
2656 /* case 2 */
2657 set_bit(STRIPE_R5C_CACHING, &sh->state);
2658 }
2659
2660 /*
2661 * When run in degraded mode, array is set to write-through mode.
2662 * This check helps drain pending write safely in the transition to
2663 * write-through mode.
2664 *
2665 * When a stripe is syncing, the write is also handled in write
2666 * through mode.
2667 */
2668 if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
2669 r5c_make_stripe_write_out(sh);
2670 return -EAGAIN;
2671 }
2672
2673 for (i = disks; i--; ) {
2674 dev = &sh->dev[i];
2675 /* if non-overwrite, use writing-out phase */
2676 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2677 !test_bit(R5_InJournal, &dev->flags)) {
2678 r5c_make_stripe_write_out(sh);
2679 return -EAGAIN;
2680 }
2681 }
2682
2683 /* if the stripe is not counted in big_stripe_tree, add it now */
2684 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
2685 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2686 tree_index = r5c_tree_index(conf, sh->sector);
2687 spin_lock(&log->tree_lock);
2688 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2689 tree_index);
2690 if (pslot) {
2691 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2692 pslot, &log->tree_lock) >>
2693 R5C_RADIX_COUNT_SHIFT;
2694 radix_tree_replace_slot(
2695 &log->big_stripe_tree, pslot,
2696 (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
2697 } else {
2698 /*
2699 * this radix_tree_insert can fail safely, so no
2700 * need to call radix_tree_preload()
2701 */
2702 ret = radix_tree_insert(
2703 &log->big_stripe_tree, tree_index,
2704 (void *)(1 << R5C_RADIX_COUNT_SHIFT));
2705 if (ret) {
2706 spin_unlock(&log->tree_lock);
2707 r5c_make_stripe_write_out(sh);
2708 return -EAGAIN;
2709 }
2710 }
2711 spin_unlock(&log->tree_lock);
2712
2713 /*
2714 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2715 * counted in the radix tree
2716 */
2717 set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
2718 atomic_inc(&conf->r5c_cached_partial_stripes);
2719 }
2720
2721 for (i = disks; i--; ) {
2722 dev = &sh->dev[i];
2723 if (dev->towrite) {
2724 set_bit(R5_Wantwrite, &dev->flags);
2725 set_bit(R5_Wantdrain, &dev->flags);
2726 set_bit(R5_LOCKED, &dev->flags);
2727 to_cache++;
2728 }
2729 }
2730
2731 if (to_cache) {
2732 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2733 /*
2734 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2735 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2736 * r5c_handle_data_cached()
2737 */
2738 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
2739 }
2740
2741 return 0;
2742}
2743
2744/*
2745 * free extra pages (orig_page) we allocated for prexor
2746 */
2747void r5c_release_extra_page(struct stripe_head *sh)
2748{
2749 struct r5conf *conf = sh->raid_conf;
2750 int i;
2751 bool using_disk_info_extra_page;
2752
2753 using_disk_info_extra_page =
2754 sh->dev[0].orig_page == conf->disks[0].extra_page;
2755
2756 for (i = sh->disks; i--; )
2757 if (sh->dev[i].page != sh->dev[i].orig_page) {
2758 struct page *p = sh->dev[i].orig_page;
2759
2760 sh->dev[i].orig_page = sh->dev[i].page;
2761 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2762
2763 if (!using_disk_info_extra_page)
2764 put_page(p);
2765 }
2766
2767 if (using_disk_info_extra_page) {
2768 clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2769 md_wakeup_thread(conf->mddev->thread);
2770 }
2771}
2772
2773void r5c_use_extra_page(struct stripe_head *sh)
2774{
2775 struct r5conf *conf = sh->raid_conf;
2776 int i;
2777 struct r5dev *dev;
2778
2779 for (i = sh->disks; i--; ) {
2780 dev = &sh->dev[i];
2781 if (dev->orig_page != dev->page)
2782 put_page(dev->orig_page);
2783 dev->orig_page = conf->disks[i].extra_page;
2784 }
2785}
2786
2787/*
2788 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2789 * stripe is committed to RAID disks.
2790 */
2791void r5c_finish_stripe_write_out(struct r5conf *conf,
2792 struct stripe_head *sh,
2793 struct stripe_head_state *s)
2794{
2795 struct r5l_log *log = READ_ONCE(conf->log);
2796 int i;
2797 sector_t tree_index;
2798 void __rcu **pslot;
2799 uintptr_t refcount;
2800
2801 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2802 return;
2803
2804 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2805 clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2806
2807 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2808 return;
2809
2810 for (i = sh->disks; i--; ) {
2811 clear_bit(R5_InJournal, &sh->dev[i].flags);
2812 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2813 wake_up_bit(&sh->dev[i].flags, R5_Overlap);
2814 }
2815
2816 /*
2817 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2818 * We updated R5_InJournal, so we also update s->injournal.
2819 */
2820 s->injournal = 0;
2821
2822 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2823 if (atomic_dec_and_test(&conf->pending_full_writes))
2824 md_wakeup_thread(conf->mddev->thread);
2825
2826 spin_lock_irq(&log->stripe_in_journal_lock);
2827 list_del_init(&sh->r5c);
2828 spin_unlock_irq(&log->stripe_in_journal_lock);
2829 sh->log_start = MaxSector;
2830
2831 atomic_dec(&log->stripe_in_journal_count);
2832 r5c_update_log_state(log);
2833
2834 /* stop counting this stripe in big_stripe_tree */
2835 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
2836 test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2837 tree_index = r5c_tree_index(conf, sh->sector);
2838 spin_lock(&log->tree_lock);
2839 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2840 tree_index);
2841 BUG_ON(pslot == NULL);
2842 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2843 pslot, &log->tree_lock) >>
2844 R5C_RADIX_COUNT_SHIFT;
2845 if (refcount == 1)
2846 radix_tree_delete(&log->big_stripe_tree, tree_index);
2847 else
2848 radix_tree_replace_slot(
2849 &log->big_stripe_tree, pslot,
2850 (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
2851 spin_unlock(&log->tree_lock);
2852 }
2853
2854 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
2855 BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
2856 atomic_dec(&conf->r5c_flushing_partial_stripes);
2857 atomic_dec(&conf->r5c_cached_partial_stripes);
2858 }
2859
2860 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2861 BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
2862 atomic_dec(&conf->r5c_flushing_full_stripes);
2863 atomic_dec(&conf->r5c_cached_full_stripes);
2864 }
2865
2866 r5l_append_flush_payload(log, sh->sector);
2867 /* stripe is flused to raid disks, we can do resync now */
2868 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2869 set_bit(STRIPE_HANDLE, &sh->state);
2870}
2871
2872int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2873{
2874 struct r5conf *conf = sh->raid_conf;
2875 int pages = 0;
2876 int reserve;
2877 int i;
2878 int ret = 0;
2879
2880 BUG_ON(!log);
2881
2882 for (i = 0; i < sh->disks; i++) {
2883 void *addr;
2884
2885 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2886 continue;
2887 addr = kmap_atomic(sh->dev[i].page);
2888 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2889 addr, PAGE_SIZE);
2890 kunmap_atomic(addr);
2891 pages++;
2892 }
2893 WARN_ON(pages == 0);
2894
2895 /*
2896 * The stripe must enter state machine again to call endio, so
2897 * don't delay.
2898 */
2899 clear_bit(STRIPE_DELAYED, &sh->state);
2900 atomic_inc(&sh->count);
2901
2902 mutex_lock(&log->io_mutex);
2903 /* meta + data */
2904 reserve = (1 + pages) << (PAGE_SHIFT - 9);
2905
2906 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2907 sh->log_start == MaxSector)
2908 r5l_add_no_space_stripe(log, sh);
2909 else if (!r5l_has_free_space(log, reserve)) {
2910 if (sh->log_start == log->last_checkpoint)
2911 BUG();
2912 else
2913 r5l_add_no_space_stripe(log, sh);
2914 } else {
2915 ret = r5l_log_stripe(log, sh, pages, 0);
2916 if (ret) {
2917 spin_lock_irq(&log->io_list_lock);
2918 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2919 spin_unlock_irq(&log->io_list_lock);
2920 }
2921 }
2922
2923 mutex_unlock(&log->io_mutex);
2924 return 0;
2925}
2926
2927/* check whether this big stripe is in write back cache. */
2928bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
2929{
2930 struct r5l_log *log = READ_ONCE(conf->log);
2931 sector_t tree_index;
2932 void *slot;
2933
2934 if (!log)
2935 return false;
2936
2937 tree_index = r5c_tree_index(conf, sect);
2938 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2939 return slot != NULL;
2940}
2941
2942static int r5l_load_log(struct r5l_log *log)
2943{
2944 struct md_rdev *rdev = log->rdev;
2945 struct page *page;
2946 struct r5l_meta_block *mb;
2947 sector_t cp = log->rdev->journal_tail;
2948 u32 stored_crc, expected_crc;
2949 bool create_super = false;
2950 int ret = 0;
2951
2952 /* Make sure it's valid */
2953 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2954 cp = 0;
2955 page = alloc_page(GFP_KERNEL);
2956 if (!page)
2957 return -ENOMEM;
2958
2959 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, false)) {
2960 ret = -EIO;
2961 goto ioerr;
2962 }
2963 mb = page_address(page);
2964
2965 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2966 mb->version != R5LOG_VERSION) {
2967 create_super = true;
2968 goto create;
2969 }
2970 stored_crc = le32_to_cpu(mb->checksum);
2971 mb->checksum = 0;
2972 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2973 if (stored_crc != expected_crc) {
2974 create_super = true;
2975 goto create;
2976 }
2977 if (le64_to_cpu(mb->position) != cp) {
2978 create_super = true;
2979 goto create;
2980 }
2981create:
2982 if (create_super) {
2983 log->last_cp_seq = get_random_u32();
2984 cp = 0;
2985 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
2986 /*
2987 * Make sure super points to correct address. Log might have
2988 * data very soon. If super hasn't correct log tail address,
2989 * recovery can't find the log
2990 */
2991 r5l_write_super(log, cp);
2992 } else
2993 log->last_cp_seq = le64_to_cpu(mb->seq);
2994
2995 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
2996 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
2997 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
2998 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
2999 log->last_checkpoint = cp;
3000
3001 __free_page(page);
3002
3003 if (create_super) {
3004 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3005 log->seq = log->last_cp_seq + 1;
3006 log->next_checkpoint = cp;
3007 } else
3008 ret = r5l_recovery_log(log);
3009
3010 r5c_update_log_state(log);
3011 return ret;
3012ioerr:
3013 __free_page(page);
3014 return ret;
3015}
3016
3017int r5l_start(struct r5l_log *log)
3018{
3019 int ret;
3020
3021 if (!log)
3022 return 0;
3023
3024 ret = r5l_load_log(log);
3025 if (ret) {
3026 struct mddev *mddev = log->rdev->mddev;
3027 struct r5conf *conf = mddev->private;
3028
3029 r5l_exit_log(conf);
3030 }
3031 return ret;
3032}
3033
3034void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
3035{
3036 struct r5conf *conf = mddev->private;
3037 struct r5l_log *log = READ_ONCE(conf->log);
3038
3039 if (!log)
3040 return;
3041
3042 if ((raid5_calc_degraded(conf) > 0 ||
3043 test_bit(Journal, &rdev->flags)) &&
3044 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3045 schedule_work(&log->disable_writeback_work);
3046}
3047
3048int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
3049{
3050 struct r5l_log *log;
3051 struct md_thread *thread;
3052 int ret;
3053
3054 pr_debug("md/raid:%s: using device %pg as journal\n",
3055 mdname(conf->mddev), rdev->bdev);
3056
3057 if (PAGE_SIZE != 4096)
3058 return -EINVAL;
3059
3060 /*
3061 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3062 * raid_disks r5l_payload_data_parity.
3063 *
3064 * Write journal and cache does not work for very big array
3065 * (raid_disks > 203)
3066 */
3067 if (sizeof(struct r5l_meta_block) +
3068 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3069 conf->raid_disks) > PAGE_SIZE) {
3070 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3071 mdname(conf->mddev), conf->raid_disks);
3072 return -EINVAL;
3073 }
3074
3075 log = kzalloc(sizeof(*log), GFP_KERNEL);
3076 if (!log)
3077 return -ENOMEM;
3078 log->rdev = rdev;
3079 log->need_cache_flush = bdev_write_cache(rdev->bdev);
3080 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3081 sizeof(rdev->mddev->uuid));
3082
3083 mutex_init(&log->io_mutex);
3084
3085 spin_lock_init(&log->io_list_lock);
3086 INIT_LIST_HEAD(&log->running_ios);
3087 INIT_LIST_HEAD(&log->io_end_ios);
3088 INIT_LIST_HEAD(&log->flushing_ios);
3089 INIT_LIST_HEAD(&log->finished_ios);
3090
3091 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3092 if (!log->io_kc)
3093 goto io_kc;
3094
3095 ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3096 if (ret)
3097 goto io_pool;
3098
3099 ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3100 if (ret)
3101 goto io_bs;
3102
3103 ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3104 if (ret)
3105 goto out_mempool;
3106
3107 spin_lock_init(&log->tree_lock);
3108 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3109
3110 thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
3111 "reclaim");
3112 if (!thread)
3113 goto reclaim_thread;
3114
3115 thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
3116 rcu_assign_pointer(log->reclaim_thread, thread);
3117
3118 init_waitqueue_head(&log->iounit_wait);
3119
3120 INIT_LIST_HEAD(&log->no_mem_stripes);
3121
3122 INIT_LIST_HEAD(&log->no_space_stripes);
3123 spin_lock_init(&log->no_space_stripes_lock);
3124
3125 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3126 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3127
3128 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3129 INIT_LIST_HEAD(&log->stripe_in_journal_list);
3130 spin_lock_init(&log->stripe_in_journal_lock);
3131 atomic_set(&log->stripe_in_journal_count, 0);
3132
3133 WRITE_ONCE(conf->log, log);
3134
3135 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
3136 return 0;
3137
3138reclaim_thread:
3139 mempool_exit(&log->meta_pool);
3140out_mempool:
3141 bioset_exit(&log->bs);
3142io_bs:
3143 mempool_exit(&log->io_pool);
3144io_pool:
3145 kmem_cache_destroy(log->io_kc);
3146io_kc:
3147 kfree(log);
3148 return -EINVAL;
3149}
3150
3151void r5l_exit_log(struct r5conf *conf)
3152{
3153 struct r5l_log *log = conf->log;
3154
3155 md_unregister_thread(conf->mddev, &log->reclaim_thread);
3156
3157 /*
3158 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3159 * ensure disable_writeback_work wakes up and exits.
3160 */
3161 WRITE_ONCE(conf->log, NULL);
3162 wake_up(&conf->mddev->sb_wait);
3163 flush_work(&log->disable_writeback_work);
3164
3165 mempool_exit(&log->meta_pool);
3166 bioset_exit(&log->bs);
3167 mempool_exit(&log->io_pool);
3168 kmem_cache_destroy(log->io_kc);
3169 kfree(log);
3170}
1/*
2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
3 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/wait.h>
17#include <linux/blkdev.h>
18#include <linux/slab.h>
19#include <linux/raid/md_p.h>
20#include <linux/crc32c.h>
21#include <linux/random.h>
22#include <linux/kthread.h>
23#include <linux/types.h>
24#include "md.h"
25#include "raid5.h"
26#include "md-bitmap.h"
27#include "raid5-log.h"
28
29/*
30 * metadata/data stored in disk with 4k size unit (a block) regardless
31 * underneath hardware sector size. only works with PAGE_SIZE == 4096
32 */
33#define BLOCK_SECTORS (8)
34#define BLOCK_SECTOR_SHIFT (3)
35
36/*
37 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
38 *
39 * In write through mode, the reclaim runs every log->max_free_space.
40 * This can prevent the recovery scans for too long
41 */
42#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
43#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
44
45/* wake up reclaim thread periodically */
46#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
47/* start flush with these full stripes */
48#define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
49/* reclaim stripes in groups */
50#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
51
52/*
53 * We only need 2 bios per I/O unit to make progress, but ensure we
54 * have a few more available to not get too tight.
55 */
56#define R5L_POOL_SIZE 4
57
58static char *r5c_journal_mode_str[] = {"write-through",
59 "write-back"};
60/*
61 * raid5 cache state machine
62 *
63 * With the RAID cache, each stripe works in two phases:
64 * - caching phase
65 * - writing-out phase
66 *
67 * These two phases are controlled by bit STRIPE_R5C_CACHING:
68 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
69 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
70 *
71 * When there is no journal, or the journal is in write-through mode,
72 * the stripe is always in writing-out phase.
73 *
74 * For write-back journal, the stripe is sent to caching phase on write
75 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
76 * the write-out phase by clearing STRIPE_R5C_CACHING.
77 *
78 * Stripes in caching phase do not write the raid disks. Instead, all
79 * writes are committed from the log device. Therefore, a stripe in
80 * caching phase handles writes as:
81 * - write to log device
82 * - return IO
83 *
84 * Stripes in writing-out phase handle writes as:
85 * - calculate parity
86 * - write pending data and parity to journal
87 * - write data and parity to raid disks
88 * - return IO for pending writes
89 */
90
91struct r5l_log {
92 struct md_rdev *rdev;
93
94 u32 uuid_checksum;
95
96 sector_t device_size; /* log device size, round to
97 * BLOCK_SECTORS */
98 sector_t max_free_space; /* reclaim run if free space is at
99 * this size */
100
101 sector_t last_checkpoint; /* log tail. where recovery scan
102 * starts from */
103 u64 last_cp_seq; /* log tail sequence */
104
105 sector_t log_start; /* log head. where new data appends */
106 u64 seq; /* log head sequence */
107
108 sector_t next_checkpoint;
109
110 struct mutex io_mutex;
111 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
112
113 spinlock_t io_list_lock;
114 struct list_head running_ios; /* io_units which are still running,
115 * and have not yet been completely
116 * written to the log */
117 struct list_head io_end_ios; /* io_units which have been completely
118 * written to the log but not yet written
119 * to the RAID */
120 struct list_head flushing_ios; /* io_units which are waiting for log
121 * cache flush */
122 struct list_head finished_ios; /* io_units which settle down in log disk */
123 struct bio flush_bio;
124
125 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
126
127 struct kmem_cache *io_kc;
128 mempool_t *io_pool;
129 struct bio_set *bs;
130 mempool_t *meta_pool;
131
132 struct md_thread *reclaim_thread;
133 unsigned long reclaim_target; /* number of space that need to be
134 * reclaimed. if it's 0, reclaim spaces
135 * used by io_units which are in
136 * IO_UNIT_STRIPE_END state (eg, reclaim
137 * dones't wait for specific io_unit
138 * switching to IO_UNIT_STRIPE_END
139 * state) */
140 wait_queue_head_t iounit_wait;
141
142 struct list_head no_space_stripes; /* pending stripes, log has no space */
143 spinlock_t no_space_stripes_lock;
144
145 bool need_cache_flush;
146
147 /* for r5c_cache */
148 enum r5c_journal_mode r5c_journal_mode;
149
150 /* all stripes in r5cache, in the order of seq at sh->log_start */
151 struct list_head stripe_in_journal_list;
152
153 spinlock_t stripe_in_journal_lock;
154 atomic_t stripe_in_journal_count;
155
156 /* to submit async io_units, to fulfill ordering of flush */
157 struct work_struct deferred_io_work;
158 /* to disable write back during in degraded mode */
159 struct work_struct disable_writeback_work;
160
161 /* to for chunk_aligned_read in writeback mode, details below */
162 spinlock_t tree_lock;
163 struct radix_tree_root big_stripe_tree;
164};
165
166/*
167 * Enable chunk_aligned_read() with write back cache.
168 *
169 * Each chunk may contain more than one stripe (for example, a 256kB
170 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
171 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
172 * For each big_stripe, we count how many stripes of this big_stripe
173 * are in the write back cache. These data are tracked in a radix tree
174 * (big_stripe_tree). We use radix_tree item pointer as the counter.
175 * r5c_tree_index() is used to calculate keys for the radix tree.
176 *
177 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
178 * big_stripe of each chunk in the tree. If this big_stripe is in the
179 * tree, chunk_aligned_read() aborts. This look up is protected by
180 * rcu_read_lock().
181 *
182 * It is necessary to remember whether a stripe is counted in
183 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
184 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
185 * two flags are set, the stripe is counted in big_stripe_tree. This
186 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
187 * r5c_try_caching_write(); and moving clear_bit of
188 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
189 * r5c_finish_stripe_write_out().
190 */
191
192/*
193 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
194 * So it is necessary to left shift the counter by 2 bits before using it
195 * as data pointer of the tree.
196 */
197#define R5C_RADIX_COUNT_SHIFT 2
198
199/*
200 * calculate key for big_stripe_tree
201 *
202 * sect: align_bi->bi_iter.bi_sector or sh->sector
203 */
204static inline sector_t r5c_tree_index(struct r5conf *conf,
205 sector_t sect)
206{
207 sector_t offset;
208
209 offset = sector_div(sect, conf->chunk_sectors);
210 return sect;
211}
212
213/*
214 * an IO range starts from a meta data block and end at the next meta data
215 * block. The io unit's the meta data block tracks data/parity followed it. io
216 * unit is written to log disk with normal write, as we always flush log disk
217 * first and then start move data to raid disks, there is no requirement to
218 * write io unit with FLUSH/FUA
219 */
220struct r5l_io_unit {
221 struct r5l_log *log;
222
223 struct page *meta_page; /* store meta block */
224 int meta_offset; /* current offset in meta_page */
225
226 struct bio *current_bio;/* current_bio accepting new data */
227
228 atomic_t pending_stripe;/* how many stripes not flushed to raid */
229 u64 seq; /* seq number of the metablock */
230 sector_t log_start; /* where the io_unit starts */
231 sector_t log_end; /* where the io_unit ends */
232 struct list_head log_sibling; /* log->running_ios */
233 struct list_head stripe_list; /* stripes added to the io_unit */
234
235 int state;
236 bool need_split_bio;
237 struct bio *split_bio;
238
239 unsigned int has_flush:1; /* include flush request */
240 unsigned int has_fua:1; /* include fua request */
241 unsigned int has_null_flush:1; /* include null flush request */
242 unsigned int has_flush_payload:1; /* include flush payload */
243 /*
244 * io isn't sent yet, flush/fua request can only be submitted till it's
245 * the first IO in running_ios list
246 */
247 unsigned int io_deferred:1;
248
249 struct bio_list flush_barriers; /* size == 0 flush bios */
250};
251
252/* r5l_io_unit state */
253enum r5l_io_unit_state {
254 IO_UNIT_RUNNING = 0, /* accepting new IO */
255 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
256 * don't accepting new bio */
257 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
258 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
259};
260
261bool r5c_is_writeback(struct r5l_log *log)
262{
263 return (log != NULL &&
264 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
265}
266
267static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
268{
269 start += inc;
270 if (start >= log->device_size)
271 start = start - log->device_size;
272 return start;
273}
274
275static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
276 sector_t end)
277{
278 if (end >= start)
279 return end - start;
280 else
281 return end + log->device_size - start;
282}
283
284static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
285{
286 sector_t used_size;
287
288 used_size = r5l_ring_distance(log, log->last_checkpoint,
289 log->log_start);
290
291 return log->device_size > used_size + size;
292}
293
294static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
295 enum r5l_io_unit_state state)
296{
297 if (WARN_ON(io->state >= state))
298 return;
299 io->state = state;
300}
301
302static void
303r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
304{
305 struct bio *wbi, *wbi2;
306
307 wbi = dev->written;
308 dev->written = NULL;
309 while (wbi && wbi->bi_iter.bi_sector <
310 dev->sector + STRIPE_SECTORS) {
311 wbi2 = r5_next_bio(wbi, dev->sector);
312 md_write_end(conf->mddev);
313 bio_endio(wbi);
314 wbi = wbi2;
315 }
316}
317
318void r5c_handle_cached_data_endio(struct r5conf *conf,
319 struct stripe_head *sh, int disks)
320{
321 int i;
322
323 for (i = sh->disks; i--; ) {
324 if (sh->dev[i].written) {
325 set_bit(R5_UPTODATE, &sh->dev[i].flags);
326 r5c_return_dev_pending_writes(conf, &sh->dev[i]);
327 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
328 STRIPE_SECTORS,
329 !test_bit(STRIPE_DEGRADED, &sh->state),
330 0);
331 }
332 }
333}
334
335void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
336
337/* Check whether we should flush some stripes to free up stripe cache */
338void r5c_check_stripe_cache_usage(struct r5conf *conf)
339{
340 int total_cached;
341
342 if (!r5c_is_writeback(conf->log))
343 return;
344
345 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
346 atomic_read(&conf->r5c_cached_full_stripes);
347
348 /*
349 * The following condition is true for either of the following:
350 * - stripe cache pressure high:
351 * total_cached > 3/4 min_nr_stripes ||
352 * empty_inactive_list_nr > 0
353 * - stripe cache pressure moderate:
354 * total_cached > 1/2 min_nr_stripes
355 */
356 if (total_cached > conf->min_nr_stripes * 1 / 2 ||
357 atomic_read(&conf->empty_inactive_list_nr) > 0)
358 r5l_wake_reclaim(conf->log, 0);
359}
360
361/*
362 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
363 * stripes in the cache
364 */
365void r5c_check_cached_full_stripe(struct r5conf *conf)
366{
367 if (!r5c_is_writeback(conf->log))
368 return;
369
370 /*
371 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
372 * or a full stripe (chunk size / 4k stripes).
373 */
374 if (atomic_read(&conf->r5c_cached_full_stripes) >=
375 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
376 conf->chunk_sectors >> STRIPE_SHIFT))
377 r5l_wake_reclaim(conf->log, 0);
378}
379
380/*
381 * Total log space (in sectors) needed to flush all data in cache
382 *
383 * To avoid deadlock due to log space, it is necessary to reserve log
384 * space to flush critical stripes (stripes that occupying log space near
385 * last_checkpoint). This function helps check how much log space is
386 * required to flush all cached stripes.
387 *
388 * To reduce log space requirements, two mechanisms are used to give cache
389 * flush higher priorities:
390 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
391 * stripes ALREADY in journal can be flushed w/o pending writes;
392 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
393 * can be delayed (r5l_add_no_space_stripe).
394 *
395 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
396 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
397 * pages of journal space. For stripes that has not passed 1, flushing it
398 * requires (conf->raid_disks + 1) pages of journal space. There are at
399 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
400 * required to flush all cached stripes (in pages) is:
401 *
402 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
403 * (group_cnt + 1) * (raid_disks + 1)
404 * or
405 * (stripe_in_journal_count) * (max_degraded + 1) +
406 * (group_cnt + 1) * (raid_disks - max_degraded)
407 */
408static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
409{
410 struct r5l_log *log = conf->log;
411
412 if (!r5c_is_writeback(log))
413 return 0;
414
415 return BLOCK_SECTORS *
416 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
417 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
418}
419
420/*
421 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
422 *
423 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
424 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
425 * device is less than 2x of reclaim_required_space.
426 */
427static inline void r5c_update_log_state(struct r5l_log *log)
428{
429 struct r5conf *conf = log->rdev->mddev->private;
430 sector_t free_space;
431 sector_t reclaim_space;
432 bool wake_reclaim = false;
433
434 if (!r5c_is_writeback(log))
435 return;
436
437 free_space = r5l_ring_distance(log, log->log_start,
438 log->last_checkpoint);
439 reclaim_space = r5c_log_required_to_flush_cache(conf);
440 if (free_space < 2 * reclaim_space)
441 set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
442 else {
443 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
444 wake_reclaim = true;
445 clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
446 }
447 if (free_space < 3 * reclaim_space)
448 set_bit(R5C_LOG_TIGHT, &conf->cache_state);
449 else
450 clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
451
452 if (wake_reclaim)
453 r5l_wake_reclaim(log, 0);
454}
455
456/*
457 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
458 * This function should only be called in write-back mode.
459 */
460void r5c_make_stripe_write_out(struct stripe_head *sh)
461{
462 struct r5conf *conf = sh->raid_conf;
463 struct r5l_log *log = conf->log;
464
465 BUG_ON(!r5c_is_writeback(log));
466
467 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
468 clear_bit(STRIPE_R5C_CACHING, &sh->state);
469
470 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
471 atomic_inc(&conf->preread_active_stripes);
472}
473
474static void r5c_handle_data_cached(struct stripe_head *sh)
475{
476 int i;
477
478 for (i = sh->disks; i--; )
479 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
480 set_bit(R5_InJournal, &sh->dev[i].flags);
481 clear_bit(R5_LOCKED, &sh->dev[i].flags);
482 }
483 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
484}
485
486/*
487 * this journal write must contain full parity,
488 * it may also contain some data pages
489 */
490static void r5c_handle_parity_cached(struct stripe_head *sh)
491{
492 int i;
493
494 for (i = sh->disks; i--; )
495 if (test_bit(R5_InJournal, &sh->dev[i].flags))
496 set_bit(R5_Wantwrite, &sh->dev[i].flags);
497}
498
499/*
500 * Setting proper flags after writing (or flushing) data and/or parity to the
501 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
502 */
503static void r5c_finish_cache_stripe(struct stripe_head *sh)
504{
505 struct r5l_log *log = sh->raid_conf->log;
506
507 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
508 BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
509 /*
510 * Set R5_InJournal for parity dev[pd_idx]. This means
511 * all data AND parity in the journal. For RAID 6, it is
512 * NOT necessary to set the flag for dev[qd_idx], as the
513 * two parities are written out together.
514 */
515 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
516 } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
517 r5c_handle_data_cached(sh);
518 } else {
519 r5c_handle_parity_cached(sh);
520 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
521 }
522}
523
524static void r5l_io_run_stripes(struct r5l_io_unit *io)
525{
526 struct stripe_head *sh, *next;
527
528 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
529 list_del_init(&sh->log_list);
530
531 r5c_finish_cache_stripe(sh);
532
533 set_bit(STRIPE_HANDLE, &sh->state);
534 raid5_release_stripe(sh);
535 }
536}
537
538static void r5l_log_run_stripes(struct r5l_log *log)
539{
540 struct r5l_io_unit *io, *next;
541
542 lockdep_assert_held(&log->io_list_lock);
543
544 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
545 /* don't change list order */
546 if (io->state < IO_UNIT_IO_END)
547 break;
548
549 list_move_tail(&io->log_sibling, &log->finished_ios);
550 r5l_io_run_stripes(io);
551 }
552}
553
554static void r5l_move_to_end_ios(struct r5l_log *log)
555{
556 struct r5l_io_unit *io, *next;
557
558 lockdep_assert_held(&log->io_list_lock);
559
560 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
561 /* don't change list order */
562 if (io->state < IO_UNIT_IO_END)
563 break;
564 list_move_tail(&io->log_sibling, &log->io_end_ios);
565 }
566}
567
568static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
569static void r5l_log_endio(struct bio *bio)
570{
571 struct r5l_io_unit *io = bio->bi_private;
572 struct r5l_io_unit *io_deferred;
573 struct r5l_log *log = io->log;
574 unsigned long flags;
575 bool has_null_flush;
576 bool has_flush_payload;
577
578 if (bio->bi_status)
579 md_error(log->rdev->mddev, log->rdev);
580
581 bio_put(bio);
582 mempool_free(io->meta_page, log->meta_pool);
583
584 spin_lock_irqsave(&log->io_list_lock, flags);
585 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
586
587 /*
588 * if the io doesn't not have null_flush or flush payload,
589 * it is not safe to access it after releasing io_list_lock.
590 * Therefore, it is necessary to check the condition with
591 * the lock held.
592 */
593 has_null_flush = io->has_null_flush;
594 has_flush_payload = io->has_flush_payload;
595
596 if (log->need_cache_flush && !list_empty(&io->stripe_list))
597 r5l_move_to_end_ios(log);
598 else
599 r5l_log_run_stripes(log);
600 if (!list_empty(&log->running_ios)) {
601 /*
602 * FLUSH/FUA io_unit is deferred because of ordering, now we
603 * can dispatch it
604 */
605 io_deferred = list_first_entry(&log->running_ios,
606 struct r5l_io_unit, log_sibling);
607 if (io_deferred->io_deferred)
608 schedule_work(&log->deferred_io_work);
609 }
610
611 spin_unlock_irqrestore(&log->io_list_lock, flags);
612
613 if (log->need_cache_flush)
614 md_wakeup_thread(log->rdev->mddev->thread);
615
616 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
617 if (has_null_flush) {
618 struct bio *bi;
619
620 WARN_ON(bio_list_empty(&io->flush_barriers));
621 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
622 bio_endio(bi);
623 if (atomic_dec_and_test(&io->pending_stripe)) {
624 __r5l_stripe_write_finished(io);
625 return;
626 }
627 }
628 }
629 /* decrease pending_stripe for flush payload */
630 if (has_flush_payload)
631 if (atomic_dec_and_test(&io->pending_stripe))
632 __r5l_stripe_write_finished(io);
633}
634
635static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
636{
637 unsigned long flags;
638
639 spin_lock_irqsave(&log->io_list_lock, flags);
640 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
641 spin_unlock_irqrestore(&log->io_list_lock, flags);
642
643 /*
644 * In case of journal device failures, submit_bio will get error
645 * and calls endio, then active stripes will continue write
646 * process. Therefore, it is not necessary to check Faulty bit
647 * of journal device here.
648 *
649 * We can't check split_bio after current_bio is submitted. If
650 * io->split_bio is null, after current_bio is submitted, current_bio
651 * might already be completed and the io_unit is freed. We submit
652 * split_bio first to avoid the issue.
653 */
654 if (io->split_bio) {
655 if (io->has_flush)
656 io->split_bio->bi_opf |= REQ_PREFLUSH;
657 if (io->has_fua)
658 io->split_bio->bi_opf |= REQ_FUA;
659 submit_bio(io->split_bio);
660 }
661
662 if (io->has_flush)
663 io->current_bio->bi_opf |= REQ_PREFLUSH;
664 if (io->has_fua)
665 io->current_bio->bi_opf |= REQ_FUA;
666 submit_bio(io->current_bio);
667}
668
669/* deferred io_unit will be dispatched here */
670static void r5l_submit_io_async(struct work_struct *work)
671{
672 struct r5l_log *log = container_of(work, struct r5l_log,
673 deferred_io_work);
674 struct r5l_io_unit *io = NULL;
675 unsigned long flags;
676
677 spin_lock_irqsave(&log->io_list_lock, flags);
678 if (!list_empty(&log->running_ios)) {
679 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
680 log_sibling);
681 if (!io->io_deferred)
682 io = NULL;
683 else
684 io->io_deferred = 0;
685 }
686 spin_unlock_irqrestore(&log->io_list_lock, flags);
687 if (io)
688 r5l_do_submit_io(log, io);
689}
690
691static void r5c_disable_writeback_async(struct work_struct *work)
692{
693 struct r5l_log *log = container_of(work, struct r5l_log,
694 disable_writeback_work);
695 struct mddev *mddev = log->rdev->mddev;
696 struct r5conf *conf = mddev->private;
697 int locked = 0;
698
699 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
700 return;
701 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
702 mdname(mddev));
703
704 /* wait superblock change before suspend */
705 wait_event(mddev->sb_wait,
706 conf->log == NULL ||
707 (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
708 (locked = mddev_trylock(mddev))));
709 if (locked) {
710 mddev_suspend(mddev);
711 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
712 mddev_resume(mddev);
713 mddev_unlock(mddev);
714 }
715}
716
717static void r5l_submit_current_io(struct r5l_log *log)
718{
719 struct r5l_io_unit *io = log->current_io;
720 struct bio *bio;
721 struct r5l_meta_block *block;
722 unsigned long flags;
723 u32 crc;
724 bool do_submit = true;
725
726 if (!io)
727 return;
728
729 block = page_address(io->meta_page);
730 block->meta_size = cpu_to_le32(io->meta_offset);
731 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
732 block->checksum = cpu_to_le32(crc);
733 bio = io->current_bio;
734
735 log->current_io = NULL;
736 spin_lock_irqsave(&log->io_list_lock, flags);
737 if (io->has_flush || io->has_fua) {
738 if (io != list_first_entry(&log->running_ios,
739 struct r5l_io_unit, log_sibling)) {
740 io->io_deferred = 1;
741 do_submit = false;
742 }
743 }
744 spin_unlock_irqrestore(&log->io_list_lock, flags);
745 if (do_submit)
746 r5l_do_submit_io(log, io);
747}
748
749static struct bio *r5l_bio_alloc(struct r5l_log *log)
750{
751 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
752
753 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
754 bio_set_dev(bio, log->rdev->bdev);
755 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
756
757 return bio;
758}
759
760static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
761{
762 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
763
764 r5c_update_log_state(log);
765 /*
766 * If we filled up the log device start from the beginning again,
767 * which will require a new bio.
768 *
769 * Note: for this to work properly the log size needs to me a multiple
770 * of BLOCK_SECTORS.
771 */
772 if (log->log_start == 0)
773 io->need_split_bio = true;
774
775 io->log_end = log->log_start;
776}
777
778static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
779{
780 struct r5l_io_unit *io;
781 struct r5l_meta_block *block;
782
783 io = mempool_alloc(log->io_pool, GFP_ATOMIC);
784 if (!io)
785 return NULL;
786 memset(io, 0, sizeof(*io));
787
788 io->log = log;
789 INIT_LIST_HEAD(&io->log_sibling);
790 INIT_LIST_HEAD(&io->stripe_list);
791 bio_list_init(&io->flush_barriers);
792 io->state = IO_UNIT_RUNNING;
793
794 io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
795 block = page_address(io->meta_page);
796 clear_page(block);
797 block->magic = cpu_to_le32(R5LOG_MAGIC);
798 block->version = R5LOG_VERSION;
799 block->seq = cpu_to_le64(log->seq);
800 block->position = cpu_to_le64(log->log_start);
801
802 io->log_start = log->log_start;
803 io->meta_offset = sizeof(struct r5l_meta_block);
804 io->seq = log->seq++;
805
806 io->current_bio = r5l_bio_alloc(log);
807 io->current_bio->bi_end_io = r5l_log_endio;
808 io->current_bio->bi_private = io;
809 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
810
811 r5_reserve_log_entry(log, io);
812
813 spin_lock_irq(&log->io_list_lock);
814 list_add_tail(&io->log_sibling, &log->running_ios);
815 spin_unlock_irq(&log->io_list_lock);
816
817 return io;
818}
819
820static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
821{
822 if (log->current_io &&
823 log->current_io->meta_offset + payload_size > PAGE_SIZE)
824 r5l_submit_current_io(log);
825
826 if (!log->current_io) {
827 log->current_io = r5l_new_meta(log);
828 if (!log->current_io)
829 return -ENOMEM;
830 }
831
832 return 0;
833}
834
835static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
836 sector_t location,
837 u32 checksum1, u32 checksum2,
838 bool checksum2_valid)
839{
840 struct r5l_io_unit *io = log->current_io;
841 struct r5l_payload_data_parity *payload;
842
843 payload = page_address(io->meta_page) + io->meta_offset;
844 payload->header.type = cpu_to_le16(type);
845 payload->header.flags = cpu_to_le16(0);
846 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
847 (PAGE_SHIFT - 9));
848 payload->location = cpu_to_le64(location);
849 payload->checksum[0] = cpu_to_le32(checksum1);
850 if (checksum2_valid)
851 payload->checksum[1] = cpu_to_le32(checksum2);
852
853 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
854 sizeof(__le32) * (1 + !!checksum2_valid);
855}
856
857static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
858{
859 struct r5l_io_unit *io = log->current_io;
860
861 if (io->need_split_bio) {
862 BUG_ON(io->split_bio);
863 io->split_bio = io->current_bio;
864 io->current_bio = r5l_bio_alloc(log);
865 bio_chain(io->current_bio, io->split_bio);
866 io->need_split_bio = false;
867 }
868
869 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
870 BUG();
871
872 r5_reserve_log_entry(log, io);
873}
874
875static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
876{
877 struct mddev *mddev = log->rdev->mddev;
878 struct r5conf *conf = mddev->private;
879 struct r5l_io_unit *io;
880 struct r5l_payload_flush *payload;
881 int meta_size;
882
883 /*
884 * payload_flush requires extra writes to the journal.
885 * To avoid handling the extra IO in quiesce, just skip
886 * flush_payload
887 */
888 if (conf->quiesce)
889 return;
890
891 mutex_lock(&log->io_mutex);
892 meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
893
894 if (r5l_get_meta(log, meta_size)) {
895 mutex_unlock(&log->io_mutex);
896 return;
897 }
898
899 /* current implementation is one stripe per flush payload */
900 io = log->current_io;
901 payload = page_address(io->meta_page) + io->meta_offset;
902 payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
903 payload->header.flags = cpu_to_le16(0);
904 payload->size = cpu_to_le32(sizeof(__le64));
905 payload->flush_stripes[0] = cpu_to_le64(sect);
906 io->meta_offset += meta_size;
907 /* multiple flush payloads count as one pending_stripe */
908 if (!io->has_flush_payload) {
909 io->has_flush_payload = 1;
910 atomic_inc(&io->pending_stripe);
911 }
912 mutex_unlock(&log->io_mutex);
913}
914
915static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
916 int data_pages, int parity_pages)
917{
918 int i;
919 int meta_size;
920 int ret;
921 struct r5l_io_unit *io;
922
923 meta_size =
924 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
925 * data_pages) +
926 sizeof(struct r5l_payload_data_parity) +
927 sizeof(__le32) * parity_pages;
928
929 ret = r5l_get_meta(log, meta_size);
930 if (ret)
931 return ret;
932
933 io = log->current_io;
934
935 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
936 io->has_flush = 1;
937
938 for (i = 0; i < sh->disks; i++) {
939 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
940 test_bit(R5_InJournal, &sh->dev[i].flags))
941 continue;
942 if (i == sh->pd_idx || i == sh->qd_idx)
943 continue;
944 if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
945 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
946 io->has_fua = 1;
947 /*
948 * we need to flush journal to make sure recovery can
949 * reach the data with fua flag
950 */
951 io->has_flush = 1;
952 }
953 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
954 raid5_compute_blocknr(sh, i, 0),
955 sh->dev[i].log_checksum, 0, false);
956 r5l_append_payload_page(log, sh->dev[i].page);
957 }
958
959 if (parity_pages == 2) {
960 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
961 sh->sector, sh->dev[sh->pd_idx].log_checksum,
962 sh->dev[sh->qd_idx].log_checksum, true);
963 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
964 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
965 } else if (parity_pages == 1) {
966 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
967 sh->sector, sh->dev[sh->pd_idx].log_checksum,
968 0, false);
969 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
970 } else /* Just writing data, not parity, in caching phase */
971 BUG_ON(parity_pages != 0);
972
973 list_add_tail(&sh->log_list, &io->stripe_list);
974 atomic_inc(&io->pending_stripe);
975 sh->log_io = io;
976
977 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
978 return 0;
979
980 if (sh->log_start == MaxSector) {
981 BUG_ON(!list_empty(&sh->r5c));
982 sh->log_start = io->log_start;
983 spin_lock_irq(&log->stripe_in_journal_lock);
984 list_add_tail(&sh->r5c,
985 &log->stripe_in_journal_list);
986 spin_unlock_irq(&log->stripe_in_journal_lock);
987 atomic_inc(&log->stripe_in_journal_count);
988 }
989 return 0;
990}
991
992/* add stripe to no_space_stripes, and then wake up reclaim */
993static inline void r5l_add_no_space_stripe(struct r5l_log *log,
994 struct stripe_head *sh)
995{
996 spin_lock(&log->no_space_stripes_lock);
997 list_add_tail(&sh->log_list, &log->no_space_stripes);
998 spin_unlock(&log->no_space_stripes_lock);
999}
1000
1001/*
1002 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
1003 * data from log to raid disks), so we shouldn't wait for reclaim here
1004 */
1005int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
1006{
1007 struct r5conf *conf = sh->raid_conf;
1008 int write_disks = 0;
1009 int data_pages, parity_pages;
1010 int reserve;
1011 int i;
1012 int ret = 0;
1013 bool wake_reclaim = false;
1014
1015 if (!log)
1016 return -EAGAIN;
1017 /* Don't support stripe batch */
1018 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1019 test_bit(STRIPE_SYNCING, &sh->state)) {
1020 /* the stripe is written to log, we start writing it to raid */
1021 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
1022 return -EAGAIN;
1023 }
1024
1025 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
1026
1027 for (i = 0; i < sh->disks; i++) {
1028 void *addr;
1029
1030 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
1031 test_bit(R5_InJournal, &sh->dev[i].flags))
1032 continue;
1033
1034 write_disks++;
1035 /* checksum is already calculated in last run */
1036 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1037 continue;
1038 addr = kmap_atomic(sh->dev[i].page);
1039 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1040 addr, PAGE_SIZE);
1041 kunmap_atomic(addr);
1042 }
1043 parity_pages = 1 + !!(sh->qd_idx >= 0);
1044 data_pages = write_disks - parity_pages;
1045
1046 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
1047 /*
1048 * The stripe must enter state machine again to finish the write, so
1049 * don't delay.
1050 */
1051 clear_bit(STRIPE_DELAYED, &sh->state);
1052 atomic_inc(&sh->count);
1053
1054 mutex_lock(&log->io_mutex);
1055 /* meta + data */
1056 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
1057
1058 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1059 if (!r5l_has_free_space(log, reserve)) {
1060 r5l_add_no_space_stripe(log, sh);
1061 wake_reclaim = true;
1062 } else {
1063 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1064 if (ret) {
1065 spin_lock_irq(&log->io_list_lock);
1066 list_add_tail(&sh->log_list,
1067 &log->no_mem_stripes);
1068 spin_unlock_irq(&log->io_list_lock);
1069 }
1070 }
1071 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1072 /*
1073 * log space critical, do not process stripes that are
1074 * not in cache yet (sh->log_start == MaxSector).
1075 */
1076 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1077 sh->log_start == MaxSector) {
1078 r5l_add_no_space_stripe(log, sh);
1079 wake_reclaim = true;
1080 reserve = 0;
1081 } else if (!r5l_has_free_space(log, reserve)) {
1082 if (sh->log_start == log->last_checkpoint)
1083 BUG();
1084 else
1085 r5l_add_no_space_stripe(log, sh);
1086 } else {
1087 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1088 if (ret) {
1089 spin_lock_irq(&log->io_list_lock);
1090 list_add_tail(&sh->log_list,
1091 &log->no_mem_stripes);
1092 spin_unlock_irq(&log->io_list_lock);
1093 }
1094 }
1095 }
1096
1097 mutex_unlock(&log->io_mutex);
1098 if (wake_reclaim)
1099 r5l_wake_reclaim(log, reserve);
1100 return 0;
1101}
1102
1103void r5l_write_stripe_run(struct r5l_log *log)
1104{
1105 if (!log)
1106 return;
1107 mutex_lock(&log->io_mutex);
1108 r5l_submit_current_io(log);
1109 mutex_unlock(&log->io_mutex);
1110}
1111
1112int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1113{
1114 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1115 /*
1116 * in write through (journal only)
1117 * we flush log disk cache first, then write stripe data to
1118 * raid disks. So if bio is finished, the log disk cache is
1119 * flushed already. The recovery guarantees we can recovery
1120 * the bio from log disk, so we don't need to flush again
1121 */
1122 if (bio->bi_iter.bi_size == 0) {
1123 bio_endio(bio);
1124 return 0;
1125 }
1126 bio->bi_opf &= ~REQ_PREFLUSH;
1127 } else {
1128 /* write back (with cache) */
1129 if (bio->bi_iter.bi_size == 0) {
1130 mutex_lock(&log->io_mutex);
1131 r5l_get_meta(log, 0);
1132 bio_list_add(&log->current_io->flush_barriers, bio);
1133 log->current_io->has_flush = 1;
1134 log->current_io->has_null_flush = 1;
1135 atomic_inc(&log->current_io->pending_stripe);
1136 r5l_submit_current_io(log);
1137 mutex_unlock(&log->io_mutex);
1138 return 0;
1139 }
1140 }
1141 return -EAGAIN;
1142}
1143
1144/* This will run after log space is reclaimed */
1145static void r5l_run_no_space_stripes(struct r5l_log *log)
1146{
1147 struct stripe_head *sh;
1148
1149 spin_lock(&log->no_space_stripes_lock);
1150 while (!list_empty(&log->no_space_stripes)) {
1151 sh = list_first_entry(&log->no_space_stripes,
1152 struct stripe_head, log_list);
1153 list_del_init(&sh->log_list);
1154 set_bit(STRIPE_HANDLE, &sh->state);
1155 raid5_release_stripe(sh);
1156 }
1157 spin_unlock(&log->no_space_stripes_lock);
1158}
1159
1160/*
1161 * calculate new last_checkpoint
1162 * for write through mode, returns log->next_checkpoint
1163 * for write back, returns log_start of first sh in stripe_in_journal_list
1164 */
1165static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1166{
1167 struct stripe_head *sh;
1168 struct r5l_log *log = conf->log;
1169 sector_t new_cp;
1170 unsigned long flags;
1171
1172 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1173 return log->next_checkpoint;
1174
1175 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1176 if (list_empty(&conf->log->stripe_in_journal_list)) {
1177 /* all stripes flushed */
1178 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1179 return log->next_checkpoint;
1180 }
1181 sh = list_first_entry(&conf->log->stripe_in_journal_list,
1182 struct stripe_head, r5c);
1183 new_cp = sh->log_start;
1184 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1185 return new_cp;
1186}
1187
1188static sector_t r5l_reclaimable_space(struct r5l_log *log)
1189{
1190 struct r5conf *conf = log->rdev->mddev->private;
1191
1192 return r5l_ring_distance(log, log->last_checkpoint,
1193 r5c_calculate_new_cp(conf));
1194}
1195
1196static void r5l_run_no_mem_stripe(struct r5l_log *log)
1197{
1198 struct stripe_head *sh;
1199
1200 lockdep_assert_held(&log->io_list_lock);
1201
1202 if (!list_empty(&log->no_mem_stripes)) {
1203 sh = list_first_entry(&log->no_mem_stripes,
1204 struct stripe_head, log_list);
1205 list_del_init(&sh->log_list);
1206 set_bit(STRIPE_HANDLE, &sh->state);
1207 raid5_release_stripe(sh);
1208 }
1209}
1210
1211static bool r5l_complete_finished_ios(struct r5l_log *log)
1212{
1213 struct r5l_io_unit *io, *next;
1214 bool found = false;
1215
1216 lockdep_assert_held(&log->io_list_lock);
1217
1218 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1219 /* don't change list order */
1220 if (io->state < IO_UNIT_STRIPE_END)
1221 break;
1222
1223 log->next_checkpoint = io->log_start;
1224
1225 list_del(&io->log_sibling);
1226 mempool_free(io, log->io_pool);
1227 r5l_run_no_mem_stripe(log);
1228
1229 found = true;
1230 }
1231
1232 return found;
1233}
1234
1235static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1236{
1237 struct r5l_log *log = io->log;
1238 struct r5conf *conf = log->rdev->mddev->private;
1239 unsigned long flags;
1240
1241 spin_lock_irqsave(&log->io_list_lock, flags);
1242 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
1243
1244 if (!r5l_complete_finished_ios(log)) {
1245 spin_unlock_irqrestore(&log->io_list_lock, flags);
1246 return;
1247 }
1248
1249 if (r5l_reclaimable_space(log) > log->max_free_space ||
1250 test_bit(R5C_LOG_TIGHT, &conf->cache_state))
1251 r5l_wake_reclaim(log, 0);
1252
1253 spin_unlock_irqrestore(&log->io_list_lock, flags);
1254 wake_up(&log->iounit_wait);
1255}
1256
1257void r5l_stripe_write_finished(struct stripe_head *sh)
1258{
1259 struct r5l_io_unit *io;
1260
1261 io = sh->log_io;
1262 sh->log_io = NULL;
1263
1264 if (io && atomic_dec_and_test(&io->pending_stripe))
1265 __r5l_stripe_write_finished(io);
1266}
1267
1268static void r5l_log_flush_endio(struct bio *bio)
1269{
1270 struct r5l_log *log = container_of(bio, struct r5l_log,
1271 flush_bio);
1272 unsigned long flags;
1273 struct r5l_io_unit *io;
1274
1275 if (bio->bi_status)
1276 md_error(log->rdev->mddev, log->rdev);
1277
1278 spin_lock_irqsave(&log->io_list_lock, flags);
1279 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1280 r5l_io_run_stripes(io);
1281 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1282 spin_unlock_irqrestore(&log->io_list_lock, flags);
1283}
1284
1285/*
1286 * Starting dispatch IO to raid.
1287 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1288 * broken meta in the middle of a log causes recovery can't find meta at the
1289 * head of log. If operations require meta at the head persistent in log, we
1290 * must make sure meta before it persistent in log too. A case is:
1291 *
1292 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1293 * data/parity must be persistent in log before we do the write to raid disks.
1294 *
1295 * The solution is we restrictly maintain io_unit list order. In this case, we
1296 * only write stripes of an io_unit to raid disks till the io_unit is the first
1297 * one whose data/parity is in log.
1298 */
1299void r5l_flush_stripe_to_raid(struct r5l_log *log)
1300{
1301 bool do_flush;
1302
1303 if (!log || !log->need_cache_flush)
1304 return;
1305
1306 spin_lock_irq(&log->io_list_lock);
1307 /* flush bio is running */
1308 if (!list_empty(&log->flushing_ios)) {
1309 spin_unlock_irq(&log->io_list_lock);
1310 return;
1311 }
1312 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1313 do_flush = !list_empty(&log->flushing_ios);
1314 spin_unlock_irq(&log->io_list_lock);
1315
1316 if (!do_flush)
1317 return;
1318 bio_reset(&log->flush_bio);
1319 bio_set_dev(&log->flush_bio, log->rdev->bdev);
1320 log->flush_bio.bi_end_io = r5l_log_flush_endio;
1321 log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1322 submit_bio(&log->flush_bio);
1323}
1324
1325static void r5l_write_super(struct r5l_log *log, sector_t cp);
1326static void r5l_write_super_and_discard_space(struct r5l_log *log,
1327 sector_t end)
1328{
1329 struct block_device *bdev = log->rdev->bdev;
1330 struct mddev *mddev;
1331
1332 r5l_write_super(log, end);
1333
1334 if (!blk_queue_discard(bdev_get_queue(bdev)))
1335 return;
1336
1337 mddev = log->rdev->mddev;
1338 /*
1339 * Discard could zero data, so before discard we must make sure
1340 * superblock is updated to new log tail. Updating superblock (either
1341 * directly call md_update_sb() or depend on md thread) must hold
1342 * reconfig mutex. On the other hand, raid5_quiesce is called with
1343 * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
1344 * for all IO finish, hence waitting for reclaim thread, while reclaim
1345 * thread is calling this function and waitting for reconfig mutex. So
1346 * there is a deadlock. We workaround this issue with a trylock.
1347 * FIXME: we could miss discard if we can't take reconfig mutex
1348 */
1349 set_mask_bits(&mddev->sb_flags, 0,
1350 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1351 if (!mddev_trylock(mddev))
1352 return;
1353 md_update_sb(mddev, 1);
1354 mddev_unlock(mddev);
1355
1356 /* discard IO error really doesn't matter, ignore it */
1357 if (log->last_checkpoint < end) {
1358 blkdev_issue_discard(bdev,
1359 log->last_checkpoint + log->rdev->data_offset,
1360 end - log->last_checkpoint, GFP_NOIO, 0);
1361 } else {
1362 blkdev_issue_discard(bdev,
1363 log->last_checkpoint + log->rdev->data_offset,
1364 log->device_size - log->last_checkpoint,
1365 GFP_NOIO, 0);
1366 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1367 GFP_NOIO, 0);
1368 }
1369}
1370
1371/*
1372 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1373 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1374 *
1375 * must hold conf->device_lock
1376 */
1377static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1378{
1379 BUG_ON(list_empty(&sh->lru));
1380 BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1381 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1382
1383 /*
1384 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1385 * raid5_release_stripe() while holding conf->device_lock
1386 */
1387 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1388 lockdep_assert_held(&conf->device_lock);
1389
1390 list_del_init(&sh->lru);
1391 atomic_inc(&sh->count);
1392
1393 set_bit(STRIPE_HANDLE, &sh->state);
1394 atomic_inc(&conf->active_stripes);
1395 r5c_make_stripe_write_out(sh);
1396
1397 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1398 atomic_inc(&conf->r5c_flushing_partial_stripes);
1399 else
1400 atomic_inc(&conf->r5c_flushing_full_stripes);
1401 raid5_release_stripe(sh);
1402}
1403
1404/*
1405 * if num == 0, flush all full stripes
1406 * if num > 0, flush all full stripes. If less than num full stripes are
1407 * flushed, flush some partial stripes until totally num stripes are
1408 * flushed or there is no more cached stripes.
1409 */
1410void r5c_flush_cache(struct r5conf *conf, int num)
1411{
1412 int count;
1413 struct stripe_head *sh, *next;
1414
1415 lockdep_assert_held(&conf->device_lock);
1416 if (!conf->log)
1417 return;
1418
1419 count = 0;
1420 list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1421 r5c_flush_stripe(conf, sh);
1422 count++;
1423 }
1424
1425 if (count >= num)
1426 return;
1427 list_for_each_entry_safe(sh, next,
1428 &conf->r5c_partial_stripe_list, lru) {
1429 r5c_flush_stripe(conf, sh);
1430 if (++count >= num)
1431 break;
1432 }
1433}
1434
1435static void r5c_do_reclaim(struct r5conf *conf)
1436{
1437 struct r5l_log *log = conf->log;
1438 struct stripe_head *sh;
1439 int count = 0;
1440 unsigned long flags;
1441 int total_cached;
1442 int stripes_to_flush;
1443 int flushing_partial, flushing_full;
1444
1445 if (!r5c_is_writeback(log))
1446 return;
1447
1448 flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1449 flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
1450 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
1451 atomic_read(&conf->r5c_cached_full_stripes) -
1452 flushing_full - flushing_partial;
1453
1454 if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1455 atomic_read(&conf->empty_inactive_list_nr) > 0)
1456 /*
1457 * if stripe cache pressure high, flush all full stripes and
1458 * some partial stripes
1459 */
1460 stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1461 else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
1462 atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
1463 R5C_FULL_STRIPE_FLUSH_BATCH(conf))
1464 /*
1465 * if stripe cache pressure moderate, or if there is many full
1466 * stripes,flush all full stripes
1467 */
1468 stripes_to_flush = 0;
1469 else
1470 /* no need to flush */
1471 stripes_to_flush = -1;
1472
1473 if (stripes_to_flush >= 0) {
1474 spin_lock_irqsave(&conf->device_lock, flags);
1475 r5c_flush_cache(conf, stripes_to_flush);
1476 spin_unlock_irqrestore(&conf->device_lock, flags);
1477 }
1478
1479 /* if log space is tight, flush stripes on stripe_in_journal_list */
1480 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1481 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1482 spin_lock(&conf->device_lock);
1483 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1484 /*
1485 * stripes on stripe_in_journal_list could be in any
1486 * state of the stripe_cache state machine. In this
1487 * case, we only want to flush stripe on
1488 * r5c_cached_full/partial_stripes. The following
1489 * condition makes sure the stripe is on one of the
1490 * two lists.
1491 */
1492 if (!list_empty(&sh->lru) &&
1493 !test_bit(STRIPE_HANDLE, &sh->state) &&
1494 atomic_read(&sh->count) == 0) {
1495 r5c_flush_stripe(conf, sh);
1496 if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1497 break;
1498 }
1499 }
1500 spin_unlock(&conf->device_lock);
1501 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1502 }
1503
1504 if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1505 r5l_run_no_space_stripes(log);
1506
1507 md_wakeup_thread(conf->mddev->thread);
1508}
1509
1510static void r5l_do_reclaim(struct r5l_log *log)
1511{
1512 struct r5conf *conf = log->rdev->mddev->private;
1513 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1514 sector_t reclaimable;
1515 sector_t next_checkpoint;
1516 bool write_super;
1517
1518 spin_lock_irq(&log->io_list_lock);
1519 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1520 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1521 /*
1522 * move proper io_unit to reclaim list. We should not change the order.
1523 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1524 * shouldn't reuse space of an unreclaimable io_unit
1525 */
1526 while (1) {
1527 reclaimable = r5l_reclaimable_space(log);
1528 if (reclaimable >= reclaim_target ||
1529 (list_empty(&log->running_ios) &&
1530 list_empty(&log->io_end_ios) &&
1531 list_empty(&log->flushing_ios) &&
1532 list_empty(&log->finished_ios)))
1533 break;
1534
1535 md_wakeup_thread(log->rdev->mddev->thread);
1536 wait_event_lock_irq(log->iounit_wait,
1537 r5l_reclaimable_space(log) > reclaimable,
1538 log->io_list_lock);
1539 }
1540
1541 next_checkpoint = r5c_calculate_new_cp(conf);
1542 spin_unlock_irq(&log->io_list_lock);
1543
1544 if (reclaimable == 0 || !write_super)
1545 return;
1546
1547 /*
1548 * write_super will flush cache of each raid disk. We must write super
1549 * here, because the log area might be reused soon and we don't want to
1550 * confuse recovery
1551 */
1552 r5l_write_super_and_discard_space(log, next_checkpoint);
1553
1554 mutex_lock(&log->io_mutex);
1555 log->last_checkpoint = next_checkpoint;
1556 r5c_update_log_state(log);
1557 mutex_unlock(&log->io_mutex);
1558
1559 r5l_run_no_space_stripes(log);
1560}
1561
1562static void r5l_reclaim_thread(struct md_thread *thread)
1563{
1564 struct mddev *mddev = thread->mddev;
1565 struct r5conf *conf = mddev->private;
1566 struct r5l_log *log = conf->log;
1567
1568 if (!log)
1569 return;
1570 r5c_do_reclaim(conf);
1571 r5l_do_reclaim(log);
1572}
1573
1574void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1575{
1576 unsigned long target;
1577 unsigned long new = (unsigned long)space; /* overflow in theory */
1578
1579 if (!log)
1580 return;
1581 do {
1582 target = log->reclaim_target;
1583 if (new < target)
1584 return;
1585 } while (cmpxchg(&log->reclaim_target, target, new) != target);
1586 md_wakeup_thread(log->reclaim_thread);
1587}
1588
1589void r5l_quiesce(struct r5l_log *log, int quiesce)
1590{
1591 struct mddev *mddev;
1592
1593 if (quiesce) {
1594 /* make sure r5l_write_super_and_discard_space exits */
1595 mddev = log->rdev->mddev;
1596 wake_up(&mddev->sb_wait);
1597 kthread_park(log->reclaim_thread->tsk);
1598 r5l_wake_reclaim(log, MaxSector);
1599 r5l_do_reclaim(log);
1600 } else
1601 kthread_unpark(log->reclaim_thread->tsk);
1602}
1603
1604bool r5l_log_disk_error(struct r5conf *conf)
1605{
1606 struct r5l_log *log;
1607 bool ret;
1608 /* don't allow write if journal disk is missing */
1609 rcu_read_lock();
1610 log = rcu_dereference(conf->log);
1611
1612 if (!log)
1613 ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1614 else
1615 ret = test_bit(Faulty, &log->rdev->flags);
1616 rcu_read_unlock();
1617 return ret;
1618}
1619
1620#define R5L_RECOVERY_PAGE_POOL_SIZE 256
1621
1622struct r5l_recovery_ctx {
1623 struct page *meta_page; /* current meta */
1624 sector_t meta_total_blocks; /* total size of current meta and data */
1625 sector_t pos; /* recovery position */
1626 u64 seq; /* recovery position seq */
1627 int data_parity_stripes; /* number of data_parity stripes */
1628 int data_only_stripes; /* number of data_only stripes */
1629 struct list_head cached_list;
1630
1631 /*
1632 * read ahead page pool (ra_pool)
1633 * in recovery, log is read sequentially. It is not efficient to
1634 * read every page with sync_page_io(). The read ahead page pool
1635 * reads multiple pages with one IO, so further log read can
1636 * just copy data from the pool.
1637 */
1638 struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
1639 sector_t pool_offset; /* offset of first page in the pool */
1640 int total_pages; /* total allocated pages */
1641 int valid_pages; /* pages with valid data */
1642 struct bio *ra_bio; /* bio to do the read ahead */
1643};
1644
1645static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1646 struct r5l_recovery_ctx *ctx)
1647{
1648 struct page *page;
1649
1650 ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, log->bs);
1651 if (!ctx->ra_bio)
1652 return -ENOMEM;
1653
1654 ctx->valid_pages = 0;
1655 ctx->total_pages = 0;
1656 while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1657 page = alloc_page(GFP_KERNEL);
1658
1659 if (!page)
1660 break;
1661 ctx->ra_pool[ctx->total_pages] = page;
1662 ctx->total_pages += 1;
1663 }
1664
1665 if (ctx->total_pages == 0) {
1666 bio_put(ctx->ra_bio);
1667 return -ENOMEM;
1668 }
1669
1670 ctx->pool_offset = 0;
1671 return 0;
1672}
1673
1674static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1675 struct r5l_recovery_ctx *ctx)
1676{
1677 int i;
1678
1679 for (i = 0; i < ctx->total_pages; ++i)
1680 put_page(ctx->ra_pool[i]);
1681 bio_put(ctx->ra_bio);
1682}
1683
1684/*
1685 * fetch ctx->valid_pages pages from offset
1686 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1687 * However, if the offset is close to the end of the journal device,
1688 * ctx->valid_pages could be smaller than ctx->total_pages
1689 */
1690static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1691 struct r5l_recovery_ctx *ctx,
1692 sector_t offset)
1693{
1694 bio_reset(ctx->ra_bio);
1695 bio_set_dev(ctx->ra_bio, log->rdev->bdev);
1696 bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
1697 ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
1698
1699 ctx->valid_pages = 0;
1700 ctx->pool_offset = offset;
1701
1702 while (ctx->valid_pages < ctx->total_pages) {
1703 bio_add_page(ctx->ra_bio,
1704 ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
1705 ctx->valid_pages += 1;
1706
1707 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1708
1709 if (offset == 0) /* reached end of the device */
1710 break;
1711 }
1712
1713 return submit_bio_wait(ctx->ra_bio);
1714}
1715
1716/*
1717 * try read a page from the read ahead page pool, if the page is not in the
1718 * pool, call r5l_recovery_fetch_ra_pool
1719 */
1720static int r5l_recovery_read_page(struct r5l_log *log,
1721 struct r5l_recovery_ctx *ctx,
1722 struct page *page,
1723 sector_t offset)
1724{
1725 int ret;
1726
1727 if (offset < ctx->pool_offset ||
1728 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1729 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1730 if (ret)
1731 return ret;
1732 }
1733
1734 BUG_ON(offset < ctx->pool_offset ||
1735 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1736
1737 memcpy(page_address(page),
1738 page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1739 BLOCK_SECTOR_SHIFT]),
1740 PAGE_SIZE);
1741 return 0;
1742}
1743
1744static int r5l_recovery_read_meta_block(struct r5l_log *log,
1745 struct r5l_recovery_ctx *ctx)
1746{
1747 struct page *page = ctx->meta_page;
1748 struct r5l_meta_block *mb;
1749 u32 crc, stored_crc;
1750 int ret;
1751
1752 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1753 if (ret != 0)
1754 return ret;
1755
1756 mb = page_address(page);
1757 stored_crc = le32_to_cpu(mb->checksum);
1758 mb->checksum = 0;
1759
1760 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1761 le64_to_cpu(mb->seq) != ctx->seq ||
1762 mb->version != R5LOG_VERSION ||
1763 le64_to_cpu(mb->position) != ctx->pos)
1764 return -EINVAL;
1765
1766 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1767 if (stored_crc != crc)
1768 return -EINVAL;
1769
1770 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1771 return -EINVAL;
1772
1773 ctx->meta_total_blocks = BLOCK_SECTORS;
1774
1775 return 0;
1776}
1777
1778static void
1779r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1780 struct page *page,
1781 sector_t pos, u64 seq)
1782{
1783 struct r5l_meta_block *mb;
1784
1785 mb = page_address(page);
1786 clear_page(mb);
1787 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1788 mb->version = R5LOG_VERSION;
1789 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1790 mb->seq = cpu_to_le64(seq);
1791 mb->position = cpu_to_le64(pos);
1792}
1793
1794static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1795 u64 seq)
1796{
1797 struct page *page;
1798 struct r5l_meta_block *mb;
1799
1800 page = alloc_page(GFP_KERNEL);
1801 if (!page)
1802 return -ENOMEM;
1803 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1804 mb = page_address(page);
1805 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1806 mb, PAGE_SIZE));
1807 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
1808 REQ_SYNC | REQ_FUA, false)) {
1809 __free_page(page);
1810 return -EIO;
1811 }
1812 __free_page(page);
1813 return 0;
1814}
1815
1816/*
1817 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1818 * to mark valid (potentially not flushed) data in the journal.
1819 *
1820 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1821 * so there should not be any mismatch here.
1822 */
1823static void r5l_recovery_load_data(struct r5l_log *log,
1824 struct stripe_head *sh,
1825 struct r5l_recovery_ctx *ctx,
1826 struct r5l_payload_data_parity *payload,
1827 sector_t log_offset)
1828{
1829 struct mddev *mddev = log->rdev->mddev;
1830 struct r5conf *conf = mddev->private;
1831 int dd_idx;
1832
1833 raid5_compute_sector(conf,
1834 le64_to_cpu(payload->location), 0,
1835 &dd_idx, sh);
1836 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1837 sh->dev[dd_idx].log_checksum =
1838 le32_to_cpu(payload->checksum[0]);
1839 ctx->meta_total_blocks += BLOCK_SECTORS;
1840
1841 set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1842 set_bit(STRIPE_R5C_CACHING, &sh->state);
1843}
1844
1845static void r5l_recovery_load_parity(struct r5l_log *log,
1846 struct stripe_head *sh,
1847 struct r5l_recovery_ctx *ctx,
1848 struct r5l_payload_data_parity *payload,
1849 sector_t log_offset)
1850{
1851 struct mddev *mddev = log->rdev->mddev;
1852 struct r5conf *conf = mddev->private;
1853
1854 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1855 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1856 sh->dev[sh->pd_idx].log_checksum =
1857 le32_to_cpu(payload->checksum[0]);
1858 set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1859
1860 if (sh->qd_idx >= 0) {
1861 r5l_recovery_read_page(
1862 log, ctx, sh->dev[sh->qd_idx].page,
1863 r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1864 sh->dev[sh->qd_idx].log_checksum =
1865 le32_to_cpu(payload->checksum[1]);
1866 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1867 }
1868 clear_bit(STRIPE_R5C_CACHING, &sh->state);
1869}
1870
1871static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1872{
1873 int i;
1874
1875 sh->state = 0;
1876 sh->log_start = MaxSector;
1877 for (i = sh->disks; i--; )
1878 sh->dev[i].flags = 0;
1879}
1880
1881static void
1882r5l_recovery_replay_one_stripe(struct r5conf *conf,
1883 struct stripe_head *sh,
1884 struct r5l_recovery_ctx *ctx)
1885{
1886 struct md_rdev *rdev, *rrdev;
1887 int disk_index;
1888 int data_count = 0;
1889
1890 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1891 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1892 continue;
1893 if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1894 continue;
1895 data_count++;
1896 }
1897
1898 /*
1899 * stripes that only have parity must have been flushed
1900 * before the crash that we are now recovering from, so
1901 * there is nothing more to recovery.
1902 */
1903 if (data_count == 0)
1904 goto out;
1905
1906 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1907 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1908 continue;
1909
1910 /* in case device is broken */
1911 rcu_read_lock();
1912 rdev = rcu_dereference(conf->disks[disk_index].rdev);
1913 if (rdev) {
1914 atomic_inc(&rdev->nr_pending);
1915 rcu_read_unlock();
1916 sync_page_io(rdev, sh->sector, PAGE_SIZE,
1917 sh->dev[disk_index].page, REQ_OP_WRITE, 0,
1918 false);
1919 rdev_dec_pending(rdev, rdev->mddev);
1920 rcu_read_lock();
1921 }
1922 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
1923 if (rrdev) {
1924 atomic_inc(&rrdev->nr_pending);
1925 rcu_read_unlock();
1926 sync_page_io(rrdev, sh->sector, PAGE_SIZE,
1927 sh->dev[disk_index].page, REQ_OP_WRITE, 0,
1928 false);
1929 rdev_dec_pending(rrdev, rrdev->mddev);
1930 rcu_read_lock();
1931 }
1932 rcu_read_unlock();
1933 }
1934 ctx->data_parity_stripes++;
1935out:
1936 r5l_recovery_reset_stripe(sh);
1937}
1938
1939static struct stripe_head *
1940r5c_recovery_alloc_stripe(struct r5conf *conf,
1941 sector_t stripe_sect)
1942{
1943 struct stripe_head *sh;
1944
1945 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
1946 if (!sh)
1947 return NULL; /* no more stripe available */
1948
1949 r5l_recovery_reset_stripe(sh);
1950
1951 return sh;
1952}
1953
1954static struct stripe_head *
1955r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1956{
1957 struct stripe_head *sh;
1958
1959 list_for_each_entry(sh, list, lru)
1960 if (sh->sector == sect)
1961 return sh;
1962 return NULL;
1963}
1964
1965static void
1966r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1967 struct r5l_recovery_ctx *ctx)
1968{
1969 struct stripe_head *sh, *next;
1970
1971 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1972 r5l_recovery_reset_stripe(sh);
1973 list_del_init(&sh->lru);
1974 raid5_release_stripe(sh);
1975 }
1976}
1977
1978static void
1979r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1980 struct r5l_recovery_ctx *ctx)
1981{
1982 struct stripe_head *sh, *next;
1983
1984 list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1985 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1986 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1987 list_del_init(&sh->lru);
1988 raid5_release_stripe(sh);
1989 }
1990}
1991
1992/* if matches return 0; otherwise return -EINVAL */
1993static int
1994r5l_recovery_verify_data_checksum(struct r5l_log *log,
1995 struct r5l_recovery_ctx *ctx,
1996 struct page *page,
1997 sector_t log_offset, __le32 log_checksum)
1998{
1999 void *addr;
2000 u32 checksum;
2001
2002 r5l_recovery_read_page(log, ctx, page, log_offset);
2003 addr = kmap_atomic(page);
2004 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
2005 kunmap_atomic(addr);
2006 return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
2007}
2008
2009/*
2010 * before loading data to stripe cache, we need verify checksum for all data,
2011 * if there is mismatch for any data page, we drop all data in the mata block
2012 */
2013static int
2014r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
2015 struct r5l_recovery_ctx *ctx)
2016{
2017 struct mddev *mddev = log->rdev->mddev;
2018 struct r5conf *conf = mddev->private;
2019 struct r5l_meta_block *mb = page_address(ctx->meta_page);
2020 sector_t mb_offset = sizeof(struct r5l_meta_block);
2021 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2022 struct page *page;
2023 struct r5l_payload_data_parity *payload;
2024 struct r5l_payload_flush *payload_flush;
2025
2026 page = alloc_page(GFP_KERNEL);
2027 if (!page)
2028 return -ENOMEM;
2029
2030 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2031 payload = (void *)mb + mb_offset;
2032 payload_flush = (void *)mb + mb_offset;
2033
2034 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2035 if (r5l_recovery_verify_data_checksum(
2036 log, ctx, page, log_offset,
2037 payload->checksum[0]) < 0)
2038 goto mismatch;
2039 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
2040 if (r5l_recovery_verify_data_checksum(
2041 log, ctx, page, log_offset,
2042 payload->checksum[0]) < 0)
2043 goto mismatch;
2044 if (conf->max_degraded == 2 && /* q for RAID 6 */
2045 r5l_recovery_verify_data_checksum(
2046 log, ctx, page,
2047 r5l_ring_add(log, log_offset,
2048 BLOCK_SECTORS),
2049 payload->checksum[1]) < 0)
2050 goto mismatch;
2051 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2052 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2053 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2054 goto mismatch;
2055
2056 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2057 mb_offset += sizeof(struct r5l_payload_flush) +
2058 le32_to_cpu(payload_flush->size);
2059 } else {
2060 /* DATA or PARITY payload */
2061 log_offset = r5l_ring_add(log, log_offset,
2062 le32_to_cpu(payload->size));
2063 mb_offset += sizeof(struct r5l_payload_data_parity) +
2064 sizeof(__le32) *
2065 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2066 }
2067
2068 }
2069
2070 put_page(page);
2071 return 0;
2072
2073mismatch:
2074 put_page(page);
2075 return -EINVAL;
2076}
2077
2078/*
2079 * Analyze all data/parity pages in one meta block
2080 * Returns:
2081 * 0 for success
2082 * -EINVAL for unknown playload type
2083 * -EAGAIN for checksum mismatch of data page
2084 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2085 */
2086static int
2087r5c_recovery_analyze_meta_block(struct r5l_log *log,
2088 struct r5l_recovery_ctx *ctx,
2089 struct list_head *cached_stripe_list)
2090{
2091 struct mddev *mddev = log->rdev->mddev;
2092 struct r5conf *conf = mddev->private;
2093 struct r5l_meta_block *mb;
2094 struct r5l_payload_data_parity *payload;
2095 struct r5l_payload_flush *payload_flush;
2096 int mb_offset;
2097 sector_t log_offset;
2098 sector_t stripe_sect;
2099 struct stripe_head *sh;
2100 int ret;
2101
2102 /*
2103 * for mismatch in data blocks, we will drop all data in this mb, but
2104 * we will still read next mb for other data with FLUSH flag, as
2105 * io_unit could finish out of order.
2106 */
2107 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2108 if (ret == -EINVAL)
2109 return -EAGAIN;
2110 else if (ret)
2111 return ret; /* -ENOMEM duo to alloc_page() failed */
2112
2113 mb = page_address(ctx->meta_page);
2114 mb_offset = sizeof(struct r5l_meta_block);
2115 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2116
2117 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2118 int dd;
2119
2120 payload = (void *)mb + mb_offset;
2121 payload_flush = (void *)mb + mb_offset;
2122
2123 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
2124 int i, count;
2125
2126 count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
2127 for (i = 0; i < count; ++i) {
2128 stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
2129 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2130 stripe_sect);
2131 if (sh) {
2132 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2133 r5l_recovery_reset_stripe(sh);
2134 list_del_init(&sh->lru);
2135 raid5_release_stripe(sh);
2136 }
2137 }
2138
2139 mb_offset += sizeof(struct r5l_payload_flush) +
2140 le32_to_cpu(payload_flush->size);
2141 continue;
2142 }
2143
2144 /* DATA or PARITY payload */
2145 stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
2146 raid5_compute_sector(
2147 conf, le64_to_cpu(payload->location), 0, &dd,
2148 NULL)
2149 : le64_to_cpu(payload->location);
2150
2151 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2152 stripe_sect);
2153
2154 if (!sh) {
2155 sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
2156 /*
2157 * cannot get stripe from raid5_get_active_stripe
2158 * try replay some stripes
2159 */
2160 if (!sh) {
2161 r5c_recovery_replay_stripes(
2162 cached_stripe_list, ctx);
2163 sh = r5c_recovery_alloc_stripe(
2164 conf, stripe_sect);
2165 }
2166 if (!sh) {
2167 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2168 mdname(mddev),
2169 conf->min_nr_stripes * 2);
2170 raid5_set_cache_size(mddev,
2171 conf->min_nr_stripes * 2);
2172 sh = r5c_recovery_alloc_stripe(conf,
2173 stripe_sect);
2174 }
2175 if (!sh) {
2176 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2177 mdname(mddev));
2178 return -ENOMEM;
2179 }
2180 list_add_tail(&sh->lru, cached_stripe_list);
2181 }
2182
2183 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
2184 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2185 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2186 r5l_recovery_replay_one_stripe(conf, sh, ctx);
2187 list_move_tail(&sh->lru, cached_stripe_list);
2188 }
2189 r5l_recovery_load_data(log, sh, ctx, payload,
2190 log_offset);
2191 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
2192 r5l_recovery_load_parity(log, sh, ctx, payload,
2193 log_offset);
2194 else
2195 return -EINVAL;
2196
2197 log_offset = r5l_ring_add(log, log_offset,
2198 le32_to_cpu(payload->size));
2199
2200 mb_offset += sizeof(struct r5l_payload_data_parity) +
2201 sizeof(__le32) *
2202 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2203 }
2204
2205 return 0;
2206}
2207
2208/*
2209 * Load the stripe into cache. The stripe will be written out later by
2210 * the stripe cache state machine.
2211 */
2212static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2213 struct stripe_head *sh)
2214{
2215 struct r5dev *dev;
2216 int i;
2217
2218 for (i = sh->disks; i--; ) {
2219 dev = sh->dev + i;
2220 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2221 set_bit(R5_InJournal, &dev->flags);
2222 set_bit(R5_UPTODATE, &dev->flags);
2223 }
2224 }
2225}
2226
2227/*
2228 * Scan through the log for all to-be-flushed data
2229 *
2230 * For stripes with data and parity, namely Data-Parity stripe
2231 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2232 *
2233 * For stripes with only data, namely Data-Only stripe
2234 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2235 *
2236 * For a stripe, if we see data after parity, we should discard all previous
2237 * data and parity for this stripe, as these data are already flushed to
2238 * the array.
2239 *
2240 * At the end of the scan, we return the new journal_tail, which points to
2241 * first data-only stripe on the journal device, or next invalid meta block.
2242 */
2243static int r5c_recovery_flush_log(struct r5l_log *log,
2244 struct r5l_recovery_ctx *ctx)
2245{
2246 struct stripe_head *sh;
2247 int ret = 0;
2248
2249 /* scan through the log */
2250 while (1) {
2251 if (r5l_recovery_read_meta_block(log, ctx))
2252 break;
2253
2254 ret = r5c_recovery_analyze_meta_block(log, ctx,
2255 &ctx->cached_list);
2256 /*
2257 * -EAGAIN means mismatch in data block, in this case, we still
2258 * try scan the next metablock
2259 */
2260 if (ret && ret != -EAGAIN)
2261 break; /* ret == -EINVAL or -ENOMEM */
2262 ctx->seq++;
2263 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2264 }
2265
2266 if (ret == -ENOMEM) {
2267 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2268 return ret;
2269 }
2270
2271 /* replay data-parity stripes */
2272 r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2273
2274 /* load data-only stripes to stripe cache */
2275 list_for_each_entry(sh, &ctx->cached_list, lru) {
2276 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2277 r5c_recovery_load_one_stripe(log, sh);
2278 ctx->data_only_stripes++;
2279 }
2280
2281 return 0;
2282}
2283
2284/*
2285 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2286 * log will start here. but we can't let superblock point to last valid
2287 * meta block. The log might looks like:
2288 * | meta 1| meta 2| meta 3|
2289 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2290 * superblock points to meta 1, we write a new valid meta 2n. if crash
2291 * happens again, new recovery will start from meta 1. Since meta 2n is
2292 * valid now, recovery will think meta 3 is valid, which is wrong.
2293 * The solution is we create a new meta in meta2 with its seq == meta
2294 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2295 * will not think meta 3 is a valid meta, because its seq doesn't match
2296 */
2297
2298/*
2299 * Before recovery, the log looks like the following
2300 *
2301 * ---------------------------------------------
2302 * | valid log | invalid log |
2303 * ---------------------------------------------
2304 * ^
2305 * |- log->last_checkpoint
2306 * |- log->last_cp_seq
2307 *
2308 * Now we scan through the log until we see invalid entry
2309 *
2310 * ---------------------------------------------
2311 * | valid log | invalid log |
2312 * ---------------------------------------------
2313 * ^ ^
2314 * |- log->last_checkpoint |- ctx->pos
2315 * |- log->last_cp_seq |- ctx->seq
2316 *
2317 * From this point, we need to increase seq number by 10 to avoid
2318 * confusing next recovery.
2319 *
2320 * ---------------------------------------------
2321 * | valid log | invalid log |
2322 * ---------------------------------------------
2323 * ^ ^
2324 * |- log->last_checkpoint |- ctx->pos+1
2325 * |- log->last_cp_seq |- ctx->seq+10001
2326 *
2327 * However, it is not safe to start the state machine yet, because data only
2328 * parities are not yet secured in RAID. To save these data only parities, we
2329 * rewrite them from seq+11.
2330 *
2331 * -----------------------------------------------------------------
2332 * | valid log | data only stripes | invalid log |
2333 * -----------------------------------------------------------------
2334 * ^ ^
2335 * |- log->last_checkpoint |- ctx->pos+n
2336 * |- log->last_cp_seq |- ctx->seq+10000+n
2337 *
2338 * If failure happens again during this process, the recovery can safe start
2339 * again from log->last_checkpoint.
2340 *
2341 * Once data only stripes are rewritten to journal, we move log_tail
2342 *
2343 * -----------------------------------------------------------------
2344 * | old log | data only stripes | invalid log |
2345 * -----------------------------------------------------------------
2346 * ^ ^
2347 * |- log->last_checkpoint |- ctx->pos+n
2348 * |- log->last_cp_seq |- ctx->seq+10000+n
2349 *
2350 * Then we can safely start the state machine. If failure happens from this
2351 * point on, the recovery will start from new log->last_checkpoint.
2352 */
2353static int
2354r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2355 struct r5l_recovery_ctx *ctx)
2356{
2357 struct stripe_head *sh;
2358 struct mddev *mddev = log->rdev->mddev;
2359 struct page *page;
2360 sector_t next_checkpoint = MaxSector;
2361
2362 page = alloc_page(GFP_KERNEL);
2363 if (!page) {
2364 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2365 mdname(mddev));
2366 return -ENOMEM;
2367 }
2368
2369 WARN_ON(list_empty(&ctx->cached_list));
2370
2371 list_for_each_entry(sh, &ctx->cached_list, lru) {
2372 struct r5l_meta_block *mb;
2373 int i;
2374 int offset;
2375 sector_t write_pos;
2376
2377 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2378 r5l_recovery_create_empty_meta_block(log, page,
2379 ctx->pos, ctx->seq);
2380 mb = page_address(page);
2381 offset = le32_to_cpu(mb->meta_size);
2382 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2383
2384 for (i = sh->disks; i--; ) {
2385 struct r5dev *dev = &sh->dev[i];
2386 struct r5l_payload_data_parity *payload;
2387 void *addr;
2388
2389 if (test_bit(R5_InJournal, &dev->flags)) {
2390 payload = (void *)mb + offset;
2391 payload->header.type = cpu_to_le16(
2392 R5LOG_PAYLOAD_DATA);
2393 payload->size = cpu_to_le32(BLOCK_SECTORS);
2394 payload->location = cpu_to_le64(
2395 raid5_compute_blocknr(sh, i, 0));
2396 addr = kmap_atomic(dev->page);
2397 payload->checksum[0] = cpu_to_le32(
2398 crc32c_le(log->uuid_checksum, addr,
2399 PAGE_SIZE));
2400 kunmap_atomic(addr);
2401 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2402 dev->page, REQ_OP_WRITE, 0, false);
2403 write_pos = r5l_ring_add(log, write_pos,
2404 BLOCK_SECTORS);
2405 offset += sizeof(__le32) +
2406 sizeof(struct r5l_payload_data_parity);
2407
2408 }
2409 }
2410 mb->meta_size = cpu_to_le32(offset);
2411 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2412 mb, PAGE_SIZE));
2413 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2414 REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
2415 sh->log_start = ctx->pos;
2416 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2417 atomic_inc(&log->stripe_in_journal_count);
2418 ctx->pos = write_pos;
2419 ctx->seq += 1;
2420 next_checkpoint = sh->log_start;
2421 }
2422 log->next_checkpoint = next_checkpoint;
2423 __free_page(page);
2424 return 0;
2425}
2426
2427static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2428 struct r5l_recovery_ctx *ctx)
2429{
2430 struct mddev *mddev = log->rdev->mddev;
2431 struct r5conf *conf = mddev->private;
2432 struct stripe_head *sh, *next;
2433
2434 if (ctx->data_only_stripes == 0)
2435 return;
2436
2437 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2438
2439 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2440 r5c_make_stripe_write_out(sh);
2441 set_bit(STRIPE_HANDLE, &sh->state);
2442 list_del_init(&sh->lru);
2443 raid5_release_stripe(sh);
2444 }
2445
2446 /* reuse conf->wait_for_quiescent in recovery */
2447 wait_event(conf->wait_for_quiescent,
2448 atomic_read(&conf->active_stripes) == 0);
2449
2450 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2451}
2452
2453static int r5l_recovery_log(struct r5l_log *log)
2454{
2455 struct mddev *mddev = log->rdev->mddev;
2456 struct r5l_recovery_ctx *ctx;
2457 int ret;
2458 sector_t pos;
2459
2460 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2461 if (!ctx)
2462 return -ENOMEM;
2463
2464 ctx->pos = log->last_checkpoint;
2465 ctx->seq = log->last_cp_seq;
2466 INIT_LIST_HEAD(&ctx->cached_list);
2467 ctx->meta_page = alloc_page(GFP_KERNEL);
2468
2469 if (!ctx->meta_page) {
2470 ret = -ENOMEM;
2471 goto meta_page;
2472 }
2473
2474 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2475 ret = -ENOMEM;
2476 goto ra_pool;
2477 }
2478
2479 ret = r5c_recovery_flush_log(log, ctx);
2480
2481 if (ret)
2482 goto error;
2483
2484 pos = ctx->pos;
2485 ctx->seq += 10000;
2486
2487 if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
2488 pr_info("md/raid:%s: starting from clean shutdown\n",
2489 mdname(mddev));
2490 else
2491 pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2492 mdname(mddev), ctx->data_only_stripes,
2493 ctx->data_parity_stripes);
2494
2495 if (ctx->data_only_stripes == 0) {
2496 log->next_checkpoint = ctx->pos;
2497 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2498 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2499 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2500 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2501 mdname(mddev));
2502 ret = -EIO;
2503 goto error;
2504 }
2505
2506 log->log_start = ctx->pos;
2507 log->seq = ctx->seq;
2508 log->last_checkpoint = pos;
2509 r5l_write_super(log, pos);
2510
2511 r5c_recovery_flush_data_only_stripes(log, ctx);
2512 ret = 0;
2513error:
2514 r5l_recovery_free_ra_pool(log, ctx);
2515ra_pool:
2516 __free_page(ctx->meta_page);
2517meta_page:
2518 kfree(ctx);
2519 return ret;
2520}
2521
2522static void r5l_write_super(struct r5l_log *log, sector_t cp)
2523{
2524 struct mddev *mddev = log->rdev->mddev;
2525
2526 log->rdev->journal_tail = cp;
2527 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2528}
2529
2530static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2531{
2532 struct r5conf *conf;
2533 int ret;
2534
2535 ret = mddev_lock(mddev);
2536 if (ret)
2537 return ret;
2538
2539 conf = mddev->private;
2540 if (!conf || !conf->log) {
2541 mddev_unlock(mddev);
2542 return 0;
2543 }
2544
2545 switch (conf->log->r5c_journal_mode) {
2546 case R5C_JOURNAL_MODE_WRITE_THROUGH:
2547 ret = snprintf(
2548 page, PAGE_SIZE, "[%s] %s\n",
2549 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2550 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2551 break;
2552 case R5C_JOURNAL_MODE_WRITE_BACK:
2553 ret = snprintf(
2554 page, PAGE_SIZE, "%s [%s]\n",
2555 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2556 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2557 break;
2558 default:
2559 ret = 0;
2560 }
2561 mddev_unlock(mddev);
2562 return ret;
2563}
2564
2565/*
2566 * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2567 *
2568 * @mode as defined in 'enum r5c_journal_mode'.
2569 *
2570 */
2571int r5c_journal_mode_set(struct mddev *mddev, int mode)
2572{
2573 struct r5conf *conf;
2574
2575 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2576 mode > R5C_JOURNAL_MODE_WRITE_BACK)
2577 return -EINVAL;
2578
2579 conf = mddev->private;
2580 if (!conf || !conf->log)
2581 return -ENODEV;
2582
2583 if (raid5_calc_degraded(conf) > 0 &&
2584 mode == R5C_JOURNAL_MODE_WRITE_BACK)
2585 return -EINVAL;
2586
2587 mddev_suspend(mddev);
2588 conf->log->r5c_journal_mode = mode;
2589 mddev_resume(mddev);
2590
2591 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2592 mdname(mddev), mode, r5c_journal_mode_str[mode]);
2593 return 0;
2594}
2595EXPORT_SYMBOL(r5c_journal_mode_set);
2596
2597static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2598 const char *page, size_t length)
2599{
2600 int mode = ARRAY_SIZE(r5c_journal_mode_str);
2601 size_t len = length;
2602 int ret;
2603
2604 if (len < 2)
2605 return -EINVAL;
2606
2607 if (page[len - 1] == '\n')
2608 len--;
2609
2610 while (mode--)
2611 if (strlen(r5c_journal_mode_str[mode]) == len &&
2612 !strncmp(page, r5c_journal_mode_str[mode], len))
2613 break;
2614 ret = mddev_lock(mddev);
2615 if (ret)
2616 return ret;
2617 ret = r5c_journal_mode_set(mddev, mode);
2618 mddev_unlock(mddev);
2619 return ret ?: length;
2620}
2621
2622struct md_sysfs_entry
2623r5c_journal_mode = __ATTR(journal_mode, 0644,
2624 r5c_journal_mode_show, r5c_journal_mode_store);
2625
2626/*
2627 * Try handle write operation in caching phase. This function should only
2628 * be called in write-back mode.
2629 *
2630 * If all outstanding writes can be handled in caching phase, returns 0
2631 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2632 * and returns -EAGAIN
2633 */
2634int r5c_try_caching_write(struct r5conf *conf,
2635 struct stripe_head *sh,
2636 struct stripe_head_state *s,
2637 int disks)
2638{
2639 struct r5l_log *log = conf->log;
2640 int i;
2641 struct r5dev *dev;
2642 int to_cache = 0;
2643 void **pslot;
2644 sector_t tree_index;
2645 int ret;
2646 uintptr_t refcount;
2647
2648 BUG_ON(!r5c_is_writeback(log));
2649
2650 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
2651 /*
2652 * There are two different scenarios here:
2653 * 1. The stripe has some data cached, and it is sent to
2654 * write-out phase for reclaim
2655 * 2. The stripe is clean, and this is the first write
2656 *
2657 * For 1, return -EAGAIN, so we continue with
2658 * handle_stripe_dirtying().
2659 *
2660 * For 2, set STRIPE_R5C_CACHING and continue with caching
2661 * write.
2662 */
2663
2664 /* case 1: anything injournal or anything in written */
2665 if (s->injournal > 0 || s->written > 0)
2666 return -EAGAIN;
2667 /* case 2 */
2668 set_bit(STRIPE_R5C_CACHING, &sh->state);
2669 }
2670
2671 /*
2672 * When run in degraded mode, array is set to write-through mode.
2673 * This check helps drain pending write safely in the transition to
2674 * write-through mode.
2675 *
2676 * When a stripe is syncing, the write is also handled in write
2677 * through mode.
2678 */
2679 if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
2680 r5c_make_stripe_write_out(sh);
2681 return -EAGAIN;
2682 }
2683
2684 for (i = disks; i--; ) {
2685 dev = &sh->dev[i];
2686 /* if non-overwrite, use writing-out phase */
2687 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2688 !test_bit(R5_InJournal, &dev->flags)) {
2689 r5c_make_stripe_write_out(sh);
2690 return -EAGAIN;
2691 }
2692 }
2693
2694 /* if the stripe is not counted in big_stripe_tree, add it now */
2695 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
2696 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2697 tree_index = r5c_tree_index(conf, sh->sector);
2698 spin_lock(&log->tree_lock);
2699 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2700 tree_index);
2701 if (pslot) {
2702 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2703 pslot, &log->tree_lock) >>
2704 R5C_RADIX_COUNT_SHIFT;
2705 radix_tree_replace_slot(
2706 &log->big_stripe_tree, pslot,
2707 (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
2708 } else {
2709 /*
2710 * this radix_tree_insert can fail safely, so no
2711 * need to call radix_tree_preload()
2712 */
2713 ret = radix_tree_insert(
2714 &log->big_stripe_tree, tree_index,
2715 (void *)(1 << R5C_RADIX_COUNT_SHIFT));
2716 if (ret) {
2717 spin_unlock(&log->tree_lock);
2718 r5c_make_stripe_write_out(sh);
2719 return -EAGAIN;
2720 }
2721 }
2722 spin_unlock(&log->tree_lock);
2723
2724 /*
2725 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2726 * counted in the radix tree
2727 */
2728 set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
2729 atomic_inc(&conf->r5c_cached_partial_stripes);
2730 }
2731
2732 for (i = disks; i--; ) {
2733 dev = &sh->dev[i];
2734 if (dev->towrite) {
2735 set_bit(R5_Wantwrite, &dev->flags);
2736 set_bit(R5_Wantdrain, &dev->flags);
2737 set_bit(R5_LOCKED, &dev->flags);
2738 to_cache++;
2739 }
2740 }
2741
2742 if (to_cache) {
2743 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2744 /*
2745 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2746 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2747 * r5c_handle_data_cached()
2748 */
2749 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
2750 }
2751
2752 return 0;
2753}
2754
2755/*
2756 * free extra pages (orig_page) we allocated for prexor
2757 */
2758void r5c_release_extra_page(struct stripe_head *sh)
2759{
2760 struct r5conf *conf = sh->raid_conf;
2761 int i;
2762 bool using_disk_info_extra_page;
2763
2764 using_disk_info_extra_page =
2765 sh->dev[0].orig_page == conf->disks[0].extra_page;
2766
2767 for (i = sh->disks; i--; )
2768 if (sh->dev[i].page != sh->dev[i].orig_page) {
2769 struct page *p = sh->dev[i].orig_page;
2770
2771 sh->dev[i].orig_page = sh->dev[i].page;
2772 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2773
2774 if (!using_disk_info_extra_page)
2775 put_page(p);
2776 }
2777
2778 if (using_disk_info_extra_page) {
2779 clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2780 md_wakeup_thread(conf->mddev->thread);
2781 }
2782}
2783
2784void r5c_use_extra_page(struct stripe_head *sh)
2785{
2786 struct r5conf *conf = sh->raid_conf;
2787 int i;
2788 struct r5dev *dev;
2789
2790 for (i = sh->disks; i--; ) {
2791 dev = &sh->dev[i];
2792 if (dev->orig_page != dev->page)
2793 put_page(dev->orig_page);
2794 dev->orig_page = conf->disks[i].extra_page;
2795 }
2796}
2797
2798/*
2799 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2800 * stripe is committed to RAID disks.
2801 */
2802void r5c_finish_stripe_write_out(struct r5conf *conf,
2803 struct stripe_head *sh,
2804 struct stripe_head_state *s)
2805{
2806 struct r5l_log *log = conf->log;
2807 int i;
2808 int do_wakeup = 0;
2809 sector_t tree_index;
2810 void **pslot;
2811 uintptr_t refcount;
2812
2813 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2814 return;
2815
2816 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2817 clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2818
2819 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2820 return;
2821
2822 for (i = sh->disks; i--; ) {
2823 clear_bit(R5_InJournal, &sh->dev[i].flags);
2824 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2825 do_wakeup = 1;
2826 }
2827
2828 /*
2829 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2830 * We updated R5_InJournal, so we also update s->injournal.
2831 */
2832 s->injournal = 0;
2833
2834 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2835 if (atomic_dec_and_test(&conf->pending_full_writes))
2836 md_wakeup_thread(conf->mddev->thread);
2837
2838 if (do_wakeup)
2839 wake_up(&conf->wait_for_overlap);
2840
2841 spin_lock_irq(&log->stripe_in_journal_lock);
2842 list_del_init(&sh->r5c);
2843 spin_unlock_irq(&log->stripe_in_journal_lock);
2844 sh->log_start = MaxSector;
2845
2846 atomic_dec(&log->stripe_in_journal_count);
2847 r5c_update_log_state(log);
2848
2849 /* stop counting this stripe in big_stripe_tree */
2850 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
2851 test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2852 tree_index = r5c_tree_index(conf, sh->sector);
2853 spin_lock(&log->tree_lock);
2854 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2855 tree_index);
2856 BUG_ON(pslot == NULL);
2857 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2858 pslot, &log->tree_lock) >>
2859 R5C_RADIX_COUNT_SHIFT;
2860 if (refcount == 1)
2861 radix_tree_delete(&log->big_stripe_tree, tree_index);
2862 else
2863 radix_tree_replace_slot(
2864 &log->big_stripe_tree, pslot,
2865 (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
2866 spin_unlock(&log->tree_lock);
2867 }
2868
2869 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
2870 BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
2871 atomic_dec(&conf->r5c_flushing_partial_stripes);
2872 atomic_dec(&conf->r5c_cached_partial_stripes);
2873 }
2874
2875 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2876 BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
2877 atomic_dec(&conf->r5c_flushing_full_stripes);
2878 atomic_dec(&conf->r5c_cached_full_stripes);
2879 }
2880
2881 r5l_append_flush_payload(log, sh->sector);
2882 /* stripe is flused to raid disks, we can do resync now */
2883 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2884 set_bit(STRIPE_HANDLE, &sh->state);
2885}
2886
2887int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2888{
2889 struct r5conf *conf = sh->raid_conf;
2890 int pages = 0;
2891 int reserve;
2892 int i;
2893 int ret = 0;
2894
2895 BUG_ON(!log);
2896
2897 for (i = 0; i < sh->disks; i++) {
2898 void *addr;
2899
2900 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2901 continue;
2902 addr = kmap_atomic(sh->dev[i].page);
2903 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2904 addr, PAGE_SIZE);
2905 kunmap_atomic(addr);
2906 pages++;
2907 }
2908 WARN_ON(pages == 0);
2909
2910 /*
2911 * The stripe must enter state machine again to call endio, so
2912 * don't delay.
2913 */
2914 clear_bit(STRIPE_DELAYED, &sh->state);
2915 atomic_inc(&sh->count);
2916
2917 mutex_lock(&log->io_mutex);
2918 /* meta + data */
2919 reserve = (1 + pages) << (PAGE_SHIFT - 9);
2920
2921 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2922 sh->log_start == MaxSector)
2923 r5l_add_no_space_stripe(log, sh);
2924 else if (!r5l_has_free_space(log, reserve)) {
2925 if (sh->log_start == log->last_checkpoint)
2926 BUG();
2927 else
2928 r5l_add_no_space_stripe(log, sh);
2929 } else {
2930 ret = r5l_log_stripe(log, sh, pages, 0);
2931 if (ret) {
2932 spin_lock_irq(&log->io_list_lock);
2933 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2934 spin_unlock_irq(&log->io_list_lock);
2935 }
2936 }
2937
2938 mutex_unlock(&log->io_mutex);
2939 return 0;
2940}
2941
2942/* check whether this big stripe is in write back cache. */
2943bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
2944{
2945 struct r5l_log *log = conf->log;
2946 sector_t tree_index;
2947 void *slot;
2948
2949 if (!log)
2950 return false;
2951
2952 WARN_ON_ONCE(!rcu_read_lock_held());
2953 tree_index = r5c_tree_index(conf, sect);
2954 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2955 return slot != NULL;
2956}
2957
2958static int r5l_load_log(struct r5l_log *log)
2959{
2960 struct md_rdev *rdev = log->rdev;
2961 struct page *page;
2962 struct r5l_meta_block *mb;
2963 sector_t cp = log->rdev->journal_tail;
2964 u32 stored_crc, expected_crc;
2965 bool create_super = false;
2966 int ret = 0;
2967
2968 /* Make sure it's valid */
2969 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2970 cp = 0;
2971 page = alloc_page(GFP_KERNEL);
2972 if (!page)
2973 return -ENOMEM;
2974
2975 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
2976 ret = -EIO;
2977 goto ioerr;
2978 }
2979 mb = page_address(page);
2980
2981 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2982 mb->version != R5LOG_VERSION) {
2983 create_super = true;
2984 goto create;
2985 }
2986 stored_crc = le32_to_cpu(mb->checksum);
2987 mb->checksum = 0;
2988 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2989 if (stored_crc != expected_crc) {
2990 create_super = true;
2991 goto create;
2992 }
2993 if (le64_to_cpu(mb->position) != cp) {
2994 create_super = true;
2995 goto create;
2996 }
2997create:
2998 if (create_super) {
2999 log->last_cp_seq = prandom_u32();
3000 cp = 0;
3001 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
3002 /*
3003 * Make sure super points to correct address. Log might have
3004 * data very soon. If super hasn't correct log tail address,
3005 * recovery can't find the log
3006 */
3007 r5l_write_super(log, cp);
3008 } else
3009 log->last_cp_seq = le64_to_cpu(mb->seq);
3010
3011 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
3012 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
3013 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
3014 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
3015 log->last_checkpoint = cp;
3016
3017 __free_page(page);
3018
3019 if (create_super) {
3020 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3021 log->seq = log->last_cp_seq + 1;
3022 log->next_checkpoint = cp;
3023 } else
3024 ret = r5l_recovery_log(log);
3025
3026 r5c_update_log_state(log);
3027 return ret;
3028ioerr:
3029 __free_page(page);
3030 return ret;
3031}
3032
3033int r5l_start(struct r5l_log *log)
3034{
3035 int ret;
3036
3037 if (!log)
3038 return 0;
3039
3040 ret = r5l_load_log(log);
3041 if (ret) {
3042 struct mddev *mddev = log->rdev->mddev;
3043 struct r5conf *conf = mddev->private;
3044
3045 r5l_exit_log(conf);
3046 }
3047 return ret;
3048}
3049
3050void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
3051{
3052 struct r5conf *conf = mddev->private;
3053 struct r5l_log *log = conf->log;
3054
3055 if (!log)
3056 return;
3057
3058 if ((raid5_calc_degraded(conf) > 0 ||
3059 test_bit(Journal, &rdev->flags)) &&
3060 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3061 schedule_work(&log->disable_writeback_work);
3062}
3063
3064int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
3065{
3066 struct request_queue *q = bdev_get_queue(rdev->bdev);
3067 struct r5l_log *log;
3068 char b[BDEVNAME_SIZE];
3069
3070 pr_debug("md/raid:%s: using device %s as journal\n",
3071 mdname(conf->mddev), bdevname(rdev->bdev, b));
3072
3073 if (PAGE_SIZE != 4096)
3074 return -EINVAL;
3075
3076 /*
3077 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
3078 * raid_disks r5l_payload_data_parity.
3079 *
3080 * Write journal and cache does not work for very big array
3081 * (raid_disks > 203)
3082 */
3083 if (sizeof(struct r5l_meta_block) +
3084 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3085 conf->raid_disks) > PAGE_SIZE) {
3086 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3087 mdname(conf->mddev), conf->raid_disks);
3088 return -EINVAL;
3089 }
3090
3091 log = kzalloc(sizeof(*log), GFP_KERNEL);
3092 if (!log)
3093 return -ENOMEM;
3094 log->rdev = rdev;
3095
3096 log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
3097
3098 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3099 sizeof(rdev->mddev->uuid));
3100
3101 mutex_init(&log->io_mutex);
3102
3103 spin_lock_init(&log->io_list_lock);
3104 INIT_LIST_HEAD(&log->running_ios);
3105 INIT_LIST_HEAD(&log->io_end_ios);
3106 INIT_LIST_HEAD(&log->flushing_ios);
3107 INIT_LIST_HEAD(&log->finished_ios);
3108 bio_init(&log->flush_bio, NULL, 0);
3109
3110 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3111 if (!log->io_kc)
3112 goto io_kc;
3113
3114 log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
3115 if (!log->io_pool)
3116 goto io_pool;
3117
3118 log->bs = bioset_create(R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3119 if (!log->bs)
3120 goto io_bs;
3121
3122 log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
3123 if (!log->meta_pool)
3124 goto out_mempool;
3125
3126 spin_lock_init(&log->tree_lock);
3127 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3128
3129 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
3130 log->rdev->mddev, "reclaim");
3131 if (!log->reclaim_thread)
3132 goto reclaim_thread;
3133 log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
3134
3135 init_waitqueue_head(&log->iounit_wait);
3136
3137 INIT_LIST_HEAD(&log->no_mem_stripes);
3138
3139 INIT_LIST_HEAD(&log->no_space_stripes);
3140 spin_lock_init(&log->no_space_stripes_lock);
3141
3142 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3143 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3144
3145 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3146 INIT_LIST_HEAD(&log->stripe_in_journal_list);
3147 spin_lock_init(&log->stripe_in_journal_lock);
3148 atomic_set(&log->stripe_in_journal_count, 0);
3149
3150 rcu_assign_pointer(conf->log, log);
3151
3152 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
3153 return 0;
3154
3155 rcu_assign_pointer(conf->log, NULL);
3156 md_unregister_thread(&log->reclaim_thread);
3157reclaim_thread:
3158 mempool_destroy(log->meta_pool);
3159out_mempool:
3160 bioset_free(log->bs);
3161io_bs:
3162 mempool_destroy(log->io_pool);
3163io_pool:
3164 kmem_cache_destroy(log->io_kc);
3165io_kc:
3166 kfree(log);
3167 return -EINVAL;
3168}
3169
3170void r5l_exit_log(struct r5conf *conf)
3171{
3172 struct r5l_log *log = conf->log;
3173
3174 conf->log = NULL;
3175 synchronize_rcu();
3176
3177 /* Ensure disable_writeback_work wakes up and exits */
3178 wake_up(&conf->mddev->sb_wait);
3179 flush_work(&log->disable_writeback_work);
3180 md_unregister_thread(&log->reclaim_thread);
3181 mempool_destroy(log->meta_pool);
3182 bioset_free(log->bs);
3183 mempool_destroy(log->io_pool);
3184 kmem_cache_destroy(log->io_kc);
3185 kfree(log);
3186}