Loading...
1/*
2 * buffered writeback throttling. loosely based on CoDel. We can't drop
3 * packets for IO scheduling, so the logic is something like this:
4 *
5 * - Monitor latencies in a defined window of time.
6 * - If the minimum latency in the above window exceeds some target, increment
7 * scaling step and scale down queue depth by a factor of 2x. The monitoring
8 * window is then shrunk to 100 / sqrt(scaling step + 1).
9 * - For any window where we don't have solid data on what the latencies
10 * look like, retain status quo.
11 * - If latencies look good, decrement scaling step.
12 * - If we're only doing writes, allow the scaling step to go negative. This
13 * will temporarily boost write performance, snapping back to a stable
14 * scaling step of 0 if reads show up or the heavy writers finish. Unlike
15 * positive scaling steps where we shrink the monitoring window, a negative
16 * scaling step retains the default step==0 window size.
17 *
18 * Copyright (C) 2016 Jens Axboe
19 *
20 */
21#include <linux/kernel.h>
22#include <linux/blk_types.h>
23#include <linux/slab.h>
24#include <linux/backing-dev.h>
25#include <linux/swap.h>
26
27#include "blk-wbt.h"
28
29#define CREATE_TRACE_POINTS
30#include <trace/events/wbt.h>
31
32enum {
33 /*
34 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
35 * from here depending on device stats
36 */
37 RWB_DEF_DEPTH = 16,
38
39 /*
40 * 100msec window
41 */
42 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
43
44 /*
45 * Disregard stats, if we don't meet this minimum
46 */
47 RWB_MIN_WRITE_SAMPLES = 3,
48
49 /*
50 * If we have this number of consecutive windows with not enough
51 * information to scale up or down, scale up.
52 */
53 RWB_UNKNOWN_BUMP = 5,
54};
55
56static inline bool rwb_enabled(struct rq_wb *rwb)
57{
58 return rwb && rwb->wb_normal != 0;
59}
60
61/*
62 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
63 * false if 'v' + 1 would be bigger than 'below'.
64 */
65static bool atomic_inc_below(atomic_t *v, int below)
66{
67 int cur = atomic_read(v);
68
69 for (;;) {
70 int old;
71
72 if (cur >= below)
73 return false;
74 old = atomic_cmpxchg(v, cur, cur + 1);
75 if (old == cur)
76 break;
77 cur = old;
78 }
79
80 return true;
81}
82
83static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
84{
85 if (rwb_enabled(rwb)) {
86 const unsigned long cur = jiffies;
87
88 if (cur != *var)
89 *var = cur;
90 }
91}
92
93/*
94 * If a task was rate throttled in balance_dirty_pages() within the last
95 * second or so, use that to indicate a higher cleaning rate.
96 */
97static bool wb_recent_wait(struct rq_wb *rwb)
98{
99 struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
100
101 return time_before(jiffies, wb->dirty_sleep + HZ);
102}
103
104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, bool is_kswapd)
105{
106 return &rwb->rq_wait[is_kswapd];
107}
108
109static void rwb_wake_all(struct rq_wb *rwb)
110{
111 int i;
112
113 for (i = 0; i < WBT_NUM_RWQ; i++) {
114 struct rq_wait *rqw = &rwb->rq_wait[i];
115
116 if (waitqueue_active(&rqw->wait))
117 wake_up_all(&rqw->wait);
118 }
119}
120
121void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
122{
123 struct rq_wait *rqw;
124 int inflight, limit;
125
126 if (!(wb_acct & WBT_TRACKED))
127 return;
128
129 rqw = get_rq_wait(rwb, wb_acct & WBT_KSWAPD);
130 inflight = atomic_dec_return(&rqw->inflight);
131
132 /*
133 * wbt got disabled with IO in flight. Wake up any potential
134 * waiters, we don't have to do more than that.
135 */
136 if (unlikely(!rwb_enabled(rwb))) {
137 rwb_wake_all(rwb);
138 return;
139 }
140
141 /*
142 * If the device does write back caching, drop further down
143 * before we wake people up.
144 */
145 if (rwb->wc && !wb_recent_wait(rwb))
146 limit = 0;
147 else
148 limit = rwb->wb_normal;
149
150 /*
151 * Don't wake anyone up if we are above the normal limit.
152 */
153 if (inflight && inflight >= limit)
154 return;
155
156 if (waitqueue_active(&rqw->wait)) {
157 int diff = limit - inflight;
158
159 if (!inflight || diff >= rwb->wb_background / 2)
160 wake_up_all(&rqw->wait);
161 }
162}
163
164/*
165 * Called on completion of a request. Note that it's also called when
166 * a request is merged, when the request gets freed.
167 */
168void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
169{
170 if (!rwb)
171 return;
172
173 if (!wbt_is_tracked(stat)) {
174 if (rwb->sync_cookie == stat) {
175 rwb->sync_issue = 0;
176 rwb->sync_cookie = NULL;
177 }
178
179 if (wbt_is_read(stat))
180 wb_timestamp(rwb, &rwb->last_comp);
181 } else {
182 WARN_ON_ONCE(stat == rwb->sync_cookie);
183 __wbt_done(rwb, wbt_stat_to_mask(stat));
184 }
185 wbt_clear_state(stat);
186}
187
188/*
189 * Return true, if we can't increase the depth further by scaling
190 */
191static bool calc_wb_limits(struct rq_wb *rwb)
192{
193 unsigned int depth;
194 bool ret = false;
195
196 if (!rwb->min_lat_nsec) {
197 rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
198 return false;
199 }
200
201 /*
202 * For QD=1 devices, this is a special case. It's important for those
203 * to have one request ready when one completes, so force a depth of
204 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
205 * since the device can't have more than that in flight. If we're
206 * scaling down, then keep a setting of 1/1/1.
207 */
208 if (rwb->queue_depth == 1) {
209 if (rwb->scale_step > 0)
210 rwb->wb_max = rwb->wb_normal = 1;
211 else {
212 rwb->wb_max = rwb->wb_normal = 2;
213 ret = true;
214 }
215 rwb->wb_background = 1;
216 } else {
217 /*
218 * scale_step == 0 is our default state. If we have suffered
219 * latency spikes, step will be > 0, and we shrink the
220 * allowed write depths. If step is < 0, we're only doing
221 * writes, and we allow a temporarily higher depth to
222 * increase performance.
223 */
224 depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
225 if (rwb->scale_step > 0)
226 depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
227 else if (rwb->scale_step < 0) {
228 unsigned int maxd = 3 * rwb->queue_depth / 4;
229
230 depth = 1 + ((depth - 1) << -rwb->scale_step);
231 if (depth > maxd) {
232 depth = maxd;
233 ret = true;
234 }
235 }
236
237 /*
238 * Set our max/normal/bg queue depths based on how far
239 * we have scaled down (->scale_step).
240 */
241 rwb->wb_max = depth;
242 rwb->wb_normal = (rwb->wb_max + 1) / 2;
243 rwb->wb_background = (rwb->wb_max + 3) / 4;
244 }
245
246 return ret;
247}
248
249static inline bool stat_sample_valid(struct blk_rq_stat *stat)
250{
251 /*
252 * We need at least one read sample, and a minimum of
253 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
254 * that it's writes impacting us, and not just some sole read on
255 * a device that is in a lower power state.
256 */
257 return (stat[READ].nr_samples >= 1 &&
258 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
259}
260
261static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
262{
263 u64 now, issue = READ_ONCE(rwb->sync_issue);
264
265 if (!issue || !rwb->sync_cookie)
266 return 0;
267
268 now = ktime_to_ns(ktime_get());
269 return now - issue;
270}
271
272enum {
273 LAT_OK = 1,
274 LAT_UNKNOWN,
275 LAT_UNKNOWN_WRITES,
276 LAT_EXCEEDED,
277};
278
279static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
280{
281 struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
282 u64 thislat;
283
284 /*
285 * If our stored sync issue exceeds the window size, or it
286 * exceeds our min target AND we haven't logged any entries,
287 * flag the latency as exceeded. wbt works off completion latencies,
288 * but for a flooded device, a single sync IO can take a long time
289 * to complete after being issued. If this time exceeds our
290 * monitoring window AND we didn't see any other completions in that
291 * window, then count that sync IO as a violation of the latency.
292 */
293 thislat = rwb_sync_issue_lat(rwb);
294 if (thislat > rwb->cur_win_nsec ||
295 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
296 trace_wbt_lat(bdi, thislat);
297 return LAT_EXCEEDED;
298 }
299
300 /*
301 * No read/write mix, if stat isn't valid
302 */
303 if (!stat_sample_valid(stat)) {
304 /*
305 * If we had writes in this stat window and the window is
306 * current, we're only doing writes. If a task recently
307 * waited or still has writes in flights, consider us doing
308 * just writes as well.
309 */
310 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
311 wbt_inflight(rwb))
312 return LAT_UNKNOWN_WRITES;
313 return LAT_UNKNOWN;
314 }
315
316 /*
317 * If the 'min' latency exceeds our target, step down.
318 */
319 if (stat[READ].min > rwb->min_lat_nsec) {
320 trace_wbt_lat(bdi, stat[READ].min);
321 trace_wbt_stat(bdi, stat);
322 return LAT_EXCEEDED;
323 }
324
325 if (rwb->scale_step)
326 trace_wbt_stat(bdi, stat);
327
328 return LAT_OK;
329}
330
331static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
332{
333 struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
334
335 trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
336 rwb->wb_background, rwb->wb_normal, rwb->wb_max);
337}
338
339static void scale_up(struct rq_wb *rwb)
340{
341 /*
342 * Hit max in previous round, stop here
343 */
344 if (rwb->scaled_max)
345 return;
346
347 rwb->scale_step--;
348 rwb->unknown_cnt = 0;
349
350 rwb->scaled_max = calc_wb_limits(rwb);
351
352 rwb_wake_all(rwb);
353
354 rwb_trace_step(rwb, "step up");
355}
356
357/*
358 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
359 * had a latency violation.
360 */
361static void scale_down(struct rq_wb *rwb, bool hard_throttle)
362{
363 /*
364 * Stop scaling down when we've hit the limit. This also prevents
365 * ->scale_step from going to crazy values, if the device can't
366 * keep up.
367 */
368 if (rwb->wb_max == 1)
369 return;
370
371 if (rwb->scale_step < 0 && hard_throttle)
372 rwb->scale_step = 0;
373 else
374 rwb->scale_step++;
375
376 rwb->scaled_max = false;
377 rwb->unknown_cnt = 0;
378 calc_wb_limits(rwb);
379 rwb_trace_step(rwb, "step down");
380}
381
382static void rwb_arm_timer(struct rq_wb *rwb)
383{
384 if (rwb->scale_step > 0) {
385 /*
386 * We should speed this up, using some variant of a fast
387 * integer inverse square root calculation. Since we only do
388 * this for every window expiration, it's not a huge deal,
389 * though.
390 */
391 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
392 int_sqrt((rwb->scale_step + 1) << 8));
393 } else {
394 /*
395 * For step < 0, we don't want to increase/decrease the
396 * window size.
397 */
398 rwb->cur_win_nsec = rwb->win_nsec;
399 }
400
401 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
402}
403
404static void wb_timer_fn(struct blk_stat_callback *cb)
405{
406 struct rq_wb *rwb = cb->data;
407 unsigned int inflight = wbt_inflight(rwb);
408 int status;
409
410 status = latency_exceeded(rwb, cb->stat);
411
412 trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
413 inflight);
414
415 /*
416 * If we exceeded the latency target, step down. If we did not,
417 * step one level up. If we don't know enough to say either exceeded
418 * or ok, then don't do anything.
419 */
420 switch (status) {
421 case LAT_EXCEEDED:
422 scale_down(rwb, true);
423 break;
424 case LAT_OK:
425 scale_up(rwb);
426 break;
427 case LAT_UNKNOWN_WRITES:
428 /*
429 * We started a the center step, but don't have a valid
430 * read/write sample, but we do have writes going on.
431 * Allow step to go negative, to increase write perf.
432 */
433 scale_up(rwb);
434 break;
435 case LAT_UNKNOWN:
436 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
437 break;
438 /*
439 * We get here when previously scaled reduced depth, and we
440 * currently don't have a valid read/write sample. For that
441 * case, slowly return to center state (step == 0).
442 */
443 if (rwb->scale_step > 0)
444 scale_up(rwb);
445 else if (rwb->scale_step < 0)
446 scale_down(rwb, false);
447 break;
448 default:
449 break;
450 }
451
452 /*
453 * Re-arm timer, if we have IO in flight
454 */
455 if (rwb->scale_step || inflight)
456 rwb_arm_timer(rwb);
457}
458
459void wbt_update_limits(struct rq_wb *rwb)
460{
461 rwb->scale_step = 0;
462 rwb->scaled_max = false;
463 calc_wb_limits(rwb);
464
465 rwb_wake_all(rwb);
466}
467
468static bool close_io(struct rq_wb *rwb)
469{
470 const unsigned long now = jiffies;
471
472 return time_before(now, rwb->last_issue + HZ / 10) ||
473 time_before(now, rwb->last_comp + HZ / 10);
474}
475
476#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
477
478static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
479{
480 unsigned int limit;
481
482 /*
483 * At this point we know it's a buffered write. If this is
484 * kswapd trying to free memory, or REQ_SYNC is set, then
485 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
486 * that. If the write is marked as a background write, then use
487 * the idle limit, or go to normal if we haven't had competing
488 * IO for a bit.
489 */
490 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
491 limit = rwb->wb_max;
492 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
493 /*
494 * If less than 100ms since we completed unrelated IO,
495 * limit us to half the depth for background writeback.
496 */
497 limit = rwb->wb_background;
498 } else
499 limit = rwb->wb_normal;
500
501 return limit;
502}
503
504static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
505 wait_queue_entry_t *wait, unsigned long rw)
506{
507 /*
508 * inc it here even if disabled, since we'll dec it at completion.
509 * this only happens if the task was sleeping in __wbt_wait(),
510 * and someone turned it off at the same time.
511 */
512 if (!rwb_enabled(rwb)) {
513 atomic_inc(&rqw->inflight);
514 return true;
515 }
516
517 /*
518 * If the waitqueue is already active and we are not the next
519 * in line to be woken up, wait for our turn.
520 */
521 if (waitqueue_active(&rqw->wait) &&
522 rqw->wait.head.next != &wait->entry)
523 return false;
524
525 return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
526}
527
528/*
529 * Block if we will exceed our limit, or if we are currently waiting for
530 * the timer to kick off queuing again.
531 */
532static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
533 __releases(lock)
534 __acquires(lock)
535{
536 struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
537 DEFINE_WAIT(wait);
538
539 if (may_queue(rwb, rqw, &wait, rw))
540 return;
541
542 do {
543 prepare_to_wait_exclusive(&rqw->wait, &wait,
544 TASK_UNINTERRUPTIBLE);
545
546 if (may_queue(rwb, rqw, &wait, rw))
547 break;
548
549 if (lock) {
550 spin_unlock_irq(lock);
551 io_schedule();
552 spin_lock_irq(lock);
553 } else
554 io_schedule();
555 } while (1);
556
557 finish_wait(&rqw->wait, &wait);
558}
559
560static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
561{
562 const int op = bio_op(bio);
563
564 /*
565 * If not a WRITE, do nothing
566 */
567 if (op != REQ_OP_WRITE)
568 return false;
569
570 /*
571 * Don't throttle WRITE_ODIRECT
572 */
573 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE))
574 return false;
575
576 return true;
577}
578
579/*
580 * Returns true if the IO request should be accounted, false if not.
581 * May sleep, if we have exceeded the writeback limits. Caller can pass
582 * in an irq held spinlock, if it holds one when calling this function.
583 * If we do sleep, we'll release and re-grab it.
584 */
585enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
586{
587 unsigned int ret = 0;
588
589 if (!rwb_enabled(rwb))
590 return 0;
591
592 if (bio_op(bio) == REQ_OP_READ)
593 ret = WBT_READ;
594
595 if (!wbt_should_throttle(rwb, bio)) {
596 if (ret & WBT_READ)
597 wb_timestamp(rwb, &rwb->last_issue);
598 return ret;
599 }
600
601 __wbt_wait(rwb, bio->bi_opf, lock);
602
603 if (!blk_stat_is_active(rwb->cb))
604 rwb_arm_timer(rwb);
605
606 if (current_is_kswapd())
607 ret |= WBT_KSWAPD;
608
609 return ret | WBT_TRACKED;
610}
611
612void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
613{
614 if (!rwb_enabled(rwb))
615 return;
616
617 /*
618 * Track sync issue, in case it takes a long time to complete. Allows
619 * us to react quicker, if a sync IO takes a long time to complete.
620 * Note that this is just a hint. 'stat' can go away when the
621 * request completes, so it's important we never dereference it. We
622 * only use the address to compare with, which is why we store the
623 * sync_issue time locally.
624 */
625 if (wbt_is_read(stat) && !rwb->sync_issue) {
626 rwb->sync_cookie = stat;
627 rwb->sync_issue = blk_stat_time(stat);
628 }
629}
630
631void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
632{
633 if (!rwb_enabled(rwb))
634 return;
635 if (stat == rwb->sync_cookie) {
636 rwb->sync_issue = 0;
637 rwb->sync_cookie = NULL;
638 }
639}
640
641void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
642{
643 if (rwb) {
644 rwb->queue_depth = depth;
645 wbt_update_limits(rwb);
646 }
647}
648
649void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
650{
651 if (rwb)
652 rwb->wc = write_cache_on;
653}
654
655/*
656 * Disable wbt, if enabled by default.
657 */
658void wbt_disable_default(struct request_queue *q)
659{
660 struct rq_wb *rwb = q->rq_wb;
661
662 if (rwb && rwb->enable_state == WBT_STATE_ON_DEFAULT)
663 wbt_exit(q);
664}
665EXPORT_SYMBOL_GPL(wbt_disable_default);
666
667/*
668 * Enable wbt if defaults are configured that way
669 */
670void wbt_enable_default(struct request_queue *q)
671{
672 /* Throttling already enabled? */
673 if (q->rq_wb)
674 return;
675
676 /* Queue not registered? Maybe shutting down... */
677 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
678 return;
679
680 if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
681 (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
682 wbt_init(q);
683}
684EXPORT_SYMBOL_GPL(wbt_enable_default);
685
686u64 wbt_default_latency_nsec(struct request_queue *q)
687{
688 /*
689 * We default to 2msec for non-rotational storage, and 75msec
690 * for rotational storage.
691 */
692 if (blk_queue_nonrot(q))
693 return 2000000ULL;
694 else
695 return 75000000ULL;
696}
697
698static int wbt_data_dir(const struct request *rq)
699{
700 const int op = req_op(rq);
701
702 if (op == REQ_OP_READ)
703 return READ;
704 else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH)
705 return WRITE;
706
707 /* don't account */
708 return -1;
709}
710
711int wbt_init(struct request_queue *q)
712{
713 struct rq_wb *rwb;
714 int i;
715
716 BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS);
717
718 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
719 if (!rwb)
720 return -ENOMEM;
721
722 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
723 if (!rwb->cb) {
724 kfree(rwb);
725 return -ENOMEM;
726 }
727
728 for (i = 0; i < WBT_NUM_RWQ; i++) {
729 atomic_set(&rwb->rq_wait[i].inflight, 0);
730 init_waitqueue_head(&rwb->rq_wait[i].wait);
731 }
732
733 rwb->last_comp = rwb->last_issue = jiffies;
734 rwb->queue = q;
735 rwb->win_nsec = RWB_WINDOW_NSEC;
736 rwb->enable_state = WBT_STATE_ON_DEFAULT;
737 wbt_update_limits(rwb);
738
739 /*
740 * Assign rwb and add the stats callback.
741 */
742 q->rq_wb = rwb;
743 blk_stat_add_callback(q, rwb->cb);
744
745 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
746
747 wbt_set_queue_depth(rwb, blk_queue_depth(q));
748 wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
749
750 return 0;
751}
752
753void wbt_exit(struct request_queue *q)
754{
755 struct rq_wb *rwb = q->rq_wb;
756
757 if (rwb) {
758 blk_stat_remove_callback(q, rwb->cb);
759 blk_stat_free_callback(rwb->cb);
760 q->rq_wb = NULL;
761 kfree(rwb);
762 }
763}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * buffered writeback throttling. loosely based on CoDel. We can't drop
4 * packets for IO scheduling, so the logic is something like this:
5 *
6 * - Monitor latencies in a defined window of time.
7 * - If the minimum latency in the above window exceeds some target, increment
8 * scaling step and scale down queue depth by a factor of 2x. The monitoring
9 * window is then shrunk to 100 / sqrt(scaling step + 1).
10 * - For any window where we don't have solid data on what the latencies
11 * look like, retain status quo.
12 * - If latencies look good, decrement scaling step.
13 * - If we're only doing writes, allow the scaling step to go negative. This
14 * will temporarily boost write performance, snapping back to a stable
15 * scaling step of 0 if reads show up or the heavy writers finish. Unlike
16 * positive scaling steps where we shrink the monitoring window, a negative
17 * scaling step retains the default step==0 window size.
18 *
19 * Copyright (C) 2016 Jens Axboe
20 *
21 */
22#include <linux/kernel.h>
23#include <linux/blk_types.h>
24#include <linux/slab.h>
25#include <linux/backing-dev.h>
26#include <linux/swap.h>
27
28#include "blk-wbt.h"
29#include "blk-rq-qos.h"
30
31#define CREATE_TRACE_POINTS
32#include <trace/events/wbt.h>
33
34static inline void wbt_clear_state(struct request *rq)
35{
36 rq->wbt_flags = 0;
37}
38
39static inline enum wbt_flags wbt_flags(struct request *rq)
40{
41 return rq->wbt_flags;
42}
43
44static inline bool wbt_is_tracked(struct request *rq)
45{
46 return rq->wbt_flags & WBT_TRACKED;
47}
48
49static inline bool wbt_is_read(struct request *rq)
50{
51 return rq->wbt_flags & WBT_READ;
52}
53
54enum {
55 /*
56 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
57 * from here depending on device stats
58 */
59 RWB_DEF_DEPTH = 16,
60
61 /*
62 * 100msec window
63 */
64 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
65
66 /*
67 * Disregard stats, if we don't meet this minimum
68 */
69 RWB_MIN_WRITE_SAMPLES = 3,
70
71 /*
72 * If we have this number of consecutive windows with not enough
73 * information to scale up or down, scale up.
74 */
75 RWB_UNKNOWN_BUMP = 5,
76};
77
78static inline bool rwb_enabled(struct rq_wb *rwb)
79{
80 return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
81 rwb->wb_normal != 0;
82}
83
84static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
85{
86 if (rwb_enabled(rwb)) {
87 const unsigned long cur = jiffies;
88
89 if (cur != *var)
90 *var = cur;
91 }
92}
93
94/*
95 * If a task was rate throttled in balance_dirty_pages() within the last
96 * second or so, use that to indicate a higher cleaning rate.
97 */
98static bool wb_recent_wait(struct rq_wb *rwb)
99{
100 struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
101
102 return time_before(jiffies, wb->dirty_sleep + HZ);
103}
104
105static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
106 enum wbt_flags wb_acct)
107{
108 if (wb_acct & WBT_KSWAPD)
109 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
110 else if (wb_acct & WBT_DISCARD)
111 return &rwb->rq_wait[WBT_RWQ_DISCARD];
112
113 return &rwb->rq_wait[WBT_RWQ_BG];
114}
115
116static void rwb_wake_all(struct rq_wb *rwb)
117{
118 int i;
119
120 for (i = 0; i < WBT_NUM_RWQ; i++) {
121 struct rq_wait *rqw = &rwb->rq_wait[i];
122
123 if (wq_has_sleeper(&rqw->wait))
124 wake_up_all(&rqw->wait);
125 }
126}
127
128static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
129 enum wbt_flags wb_acct)
130{
131 int inflight, limit;
132
133 inflight = atomic_dec_return(&rqw->inflight);
134
135 /*
136 * wbt got disabled with IO in flight. Wake up any potential
137 * waiters, we don't have to do more than that.
138 */
139 if (unlikely(!rwb_enabled(rwb))) {
140 rwb_wake_all(rwb);
141 return;
142 }
143
144 /*
145 * For discards, our limit is always the background. For writes, if
146 * the device does write back caching, drop further down before we
147 * wake people up.
148 */
149 if (wb_acct & WBT_DISCARD)
150 limit = rwb->wb_background;
151 else if (rwb->wc && !wb_recent_wait(rwb))
152 limit = 0;
153 else
154 limit = rwb->wb_normal;
155
156 /*
157 * Don't wake anyone up if we are above the normal limit.
158 */
159 if (inflight && inflight >= limit)
160 return;
161
162 if (wq_has_sleeper(&rqw->wait)) {
163 int diff = limit - inflight;
164
165 if (!inflight || diff >= rwb->wb_background / 2)
166 wake_up_all(&rqw->wait);
167 }
168}
169
170static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
171{
172 struct rq_wb *rwb = RQWB(rqos);
173 struct rq_wait *rqw;
174
175 if (!(wb_acct & WBT_TRACKED))
176 return;
177
178 rqw = get_rq_wait(rwb, wb_acct);
179 wbt_rqw_done(rwb, rqw, wb_acct);
180}
181
182/*
183 * Called on completion of a request. Note that it's also called when
184 * a request is merged, when the request gets freed.
185 */
186static void wbt_done(struct rq_qos *rqos, struct request *rq)
187{
188 struct rq_wb *rwb = RQWB(rqos);
189
190 if (!wbt_is_tracked(rq)) {
191 if (rwb->sync_cookie == rq) {
192 rwb->sync_issue = 0;
193 rwb->sync_cookie = NULL;
194 }
195
196 if (wbt_is_read(rq))
197 wb_timestamp(rwb, &rwb->last_comp);
198 } else {
199 WARN_ON_ONCE(rq == rwb->sync_cookie);
200 __wbt_done(rqos, wbt_flags(rq));
201 }
202 wbt_clear_state(rq);
203}
204
205static inline bool stat_sample_valid(struct blk_rq_stat *stat)
206{
207 /*
208 * We need at least one read sample, and a minimum of
209 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
210 * that it's writes impacting us, and not just some sole read on
211 * a device that is in a lower power state.
212 */
213 return (stat[READ].nr_samples >= 1 &&
214 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
215}
216
217static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
218{
219 u64 now, issue = READ_ONCE(rwb->sync_issue);
220
221 if (!issue || !rwb->sync_cookie)
222 return 0;
223
224 now = ktime_to_ns(ktime_get());
225 return now - issue;
226}
227
228enum {
229 LAT_OK = 1,
230 LAT_UNKNOWN,
231 LAT_UNKNOWN_WRITES,
232 LAT_EXCEEDED,
233};
234
235static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
236{
237 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
238 struct rq_depth *rqd = &rwb->rq_depth;
239 u64 thislat;
240
241 /*
242 * If our stored sync issue exceeds the window size, or it
243 * exceeds our min target AND we haven't logged any entries,
244 * flag the latency as exceeded. wbt works off completion latencies,
245 * but for a flooded device, a single sync IO can take a long time
246 * to complete after being issued. If this time exceeds our
247 * monitoring window AND we didn't see any other completions in that
248 * window, then count that sync IO as a violation of the latency.
249 */
250 thislat = rwb_sync_issue_lat(rwb);
251 if (thislat > rwb->cur_win_nsec ||
252 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
253 trace_wbt_lat(bdi, thislat);
254 return LAT_EXCEEDED;
255 }
256
257 /*
258 * No read/write mix, if stat isn't valid
259 */
260 if (!stat_sample_valid(stat)) {
261 /*
262 * If we had writes in this stat window and the window is
263 * current, we're only doing writes. If a task recently
264 * waited or still has writes in flights, consider us doing
265 * just writes as well.
266 */
267 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
268 wbt_inflight(rwb))
269 return LAT_UNKNOWN_WRITES;
270 return LAT_UNKNOWN;
271 }
272
273 /*
274 * If the 'min' latency exceeds our target, step down.
275 */
276 if (stat[READ].min > rwb->min_lat_nsec) {
277 trace_wbt_lat(bdi, stat[READ].min);
278 trace_wbt_stat(bdi, stat);
279 return LAT_EXCEEDED;
280 }
281
282 if (rqd->scale_step)
283 trace_wbt_stat(bdi, stat);
284
285 return LAT_OK;
286}
287
288static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
289{
290 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
291 struct rq_depth *rqd = &rwb->rq_depth;
292
293 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
294 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
295}
296
297static void calc_wb_limits(struct rq_wb *rwb)
298{
299 if (rwb->min_lat_nsec == 0) {
300 rwb->wb_normal = rwb->wb_background = 0;
301 } else if (rwb->rq_depth.max_depth <= 2) {
302 rwb->wb_normal = rwb->rq_depth.max_depth;
303 rwb->wb_background = 1;
304 } else {
305 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
306 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
307 }
308}
309
310static void scale_up(struct rq_wb *rwb)
311{
312 if (!rq_depth_scale_up(&rwb->rq_depth))
313 return;
314 calc_wb_limits(rwb);
315 rwb->unknown_cnt = 0;
316 rwb_wake_all(rwb);
317 rwb_trace_step(rwb, tracepoint_string("scale up"));
318}
319
320static void scale_down(struct rq_wb *rwb, bool hard_throttle)
321{
322 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
323 return;
324 calc_wb_limits(rwb);
325 rwb->unknown_cnt = 0;
326 rwb_trace_step(rwb, tracepoint_string("scale down"));
327}
328
329static void rwb_arm_timer(struct rq_wb *rwb)
330{
331 struct rq_depth *rqd = &rwb->rq_depth;
332
333 if (rqd->scale_step > 0) {
334 /*
335 * We should speed this up, using some variant of a fast
336 * integer inverse square root calculation. Since we only do
337 * this for every window expiration, it's not a huge deal,
338 * though.
339 */
340 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
341 int_sqrt((rqd->scale_step + 1) << 8));
342 } else {
343 /*
344 * For step < 0, we don't want to increase/decrease the
345 * window size.
346 */
347 rwb->cur_win_nsec = rwb->win_nsec;
348 }
349
350 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
351}
352
353static void wb_timer_fn(struct blk_stat_callback *cb)
354{
355 struct rq_wb *rwb = cb->data;
356 struct rq_depth *rqd = &rwb->rq_depth;
357 unsigned int inflight = wbt_inflight(rwb);
358 int status;
359
360 status = latency_exceeded(rwb, cb->stat);
361
362 trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
363 inflight);
364
365 /*
366 * If we exceeded the latency target, step down. If we did not,
367 * step one level up. If we don't know enough to say either exceeded
368 * or ok, then don't do anything.
369 */
370 switch (status) {
371 case LAT_EXCEEDED:
372 scale_down(rwb, true);
373 break;
374 case LAT_OK:
375 scale_up(rwb);
376 break;
377 case LAT_UNKNOWN_WRITES:
378 /*
379 * We started a the center step, but don't have a valid
380 * read/write sample, but we do have writes going on.
381 * Allow step to go negative, to increase write perf.
382 */
383 scale_up(rwb);
384 break;
385 case LAT_UNKNOWN:
386 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
387 break;
388 /*
389 * We get here when previously scaled reduced depth, and we
390 * currently don't have a valid read/write sample. For that
391 * case, slowly return to center state (step == 0).
392 */
393 if (rqd->scale_step > 0)
394 scale_up(rwb);
395 else if (rqd->scale_step < 0)
396 scale_down(rwb, false);
397 break;
398 default:
399 break;
400 }
401
402 /*
403 * Re-arm timer, if we have IO in flight
404 */
405 if (rqd->scale_step || inflight)
406 rwb_arm_timer(rwb);
407}
408
409static void wbt_update_limits(struct rq_wb *rwb)
410{
411 struct rq_depth *rqd = &rwb->rq_depth;
412
413 rqd->scale_step = 0;
414 rqd->scaled_max = false;
415
416 rq_depth_calc_max_depth(rqd);
417 calc_wb_limits(rwb);
418
419 rwb_wake_all(rwb);
420}
421
422u64 wbt_get_min_lat(struct request_queue *q)
423{
424 struct rq_qos *rqos = wbt_rq_qos(q);
425 if (!rqos)
426 return 0;
427 return RQWB(rqos)->min_lat_nsec;
428}
429
430void wbt_set_min_lat(struct request_queue *q, u64 val)
431{
432 struct rq_qos *rqos = wbt_rq_qos(q);
433 if (!rqos)
434 return;
435 RQWB(rqos)->min_lat_nsec = val;
436 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
437 wbt_update_limits(RQWB(rqos));
438}
439
440
441static bool close_io(struct rq_wb *rwb)
442{
443 const unsigned long now = jiffies;
444
445 return time_before(now, rwb->last_issue + HZ / 10) ||
446 time_before(now, rwb->last_comp + HZ / 10);
447}
448
449#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
450
451static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
452{
453 unsigned int limit;
454
455 /*
456 * If we got disabled, just return UINT_MAX. This ensures that
457 * we'll properly inc a new IO, and dec+wakeup at the end.
458 */
459 if (!rwb_enabled(rwb))
460 return UINT_MAX;
461
462 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
463 return rwb->wb_background;
464
465 /*
466 * At this point we know it's a buffered write. If this is
467 * kswapd trying to free memory, or REQ_SYNC is set, then
468 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
469 * that. If the write is marked as a background write, then use
470 * the idle limit, or go to normal if we haven't had competing
471 * IO for a bit.
472 */
473 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
474 limit = rwb->rq_depth.max_depth;
475 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
476 /*
477 * If less than 100ms since we completed unrelated IO,
478 * limit us to half the depth for background writeback.
479 */
480 limit = rwb->wb_background;
481 } else
482 limit = rwb->wb_normal;
483
484 return limit;
485}
486
487struct wbt_wait_data {
488 struct rq_wb *rwb;
489 enum wbt_flags wb_acct;
490 unsigned long rw;
491};
492
493static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
494{
495 struct wbt_wait_data *data = private_data;
496 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
497}
498
499static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
500{
501 struct wbt_wait_data *data = private_data;
502 wbt_rqw_done(data->rwb, rqw, data->wb_acct);
503}
504
505/*
506 * Block if we will exceed our limit, or if we are currently waiting for
507 * the timer to kick off queuing again.
508 */
509static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
510 unsigned long rw)
511{
512 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
513 struct wbt_wait_data data = {
514 .rwb = rwb,
515 .wb_acct = wb_acct,
516 .rw = rw,
517 };
518
519 rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
520}
521
522static inline bool wbt_should_throttle(struct bio *bio)
523{
524 switch (bio_op(bio)) {
525 case REQ_OP_WRITE:
526 /*
527 * Don't throttle WRITE_ODIRECT
528 */
529 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
530 (REQ_SYNC | REQ_IDLE))
531 return false;
532 fallthrough;
533 case REQ_OP_DISCARD:
534 return true;
535 default:
536 return false;
537 }
538}
539
540static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
541{
542 enum wbt_flags flags = 0;
543
544 if (!rwb_enabled(rwb))
545 return 0;
546
547 if (bio_op(bio) == REQ_OP_READ) {
548 flags = WBT_READ;
549 } else if (wbt_should_throttle(bio)) {
550 if (current_is_kswapd())
551 flags |= WBT_KSWAPD;
552 if (bio_op(bio) == REQ_OP_DISCARD)
553 flags |= WBT_DISCARD;
554 flags |= WBT_TRACKED;
555 }
556 return flags;
557}
558
559static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
560{
561 struct rq_wb *rwb = RQWB(rqos);
562 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
563 __wbt_done(rqos, flags);
564}
565
566/*
567 * May sleep, if we have exceeded the writeback limits. Caller can pass
568 * in an irq held spinlock, if it holds one when calling this function.
569 * If we do sleep, we'll release and re-grab it.
570 */
571static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
572{
573 struct rq_wb *rwb = RQWB(rqos);
574 enum wbt_flags flags;
575
576 flags = bio_to_wbt_flags(rwb, bio);
577 if (!(flags & WBT_TRACKED)) {
578 if (flags & WBT_READ)
579 wb_timestamp(rwb, &rwb->last_issue);
580 return;
581 }
582
583 __wbt_wait(rwb, flags, bio->bi_opf);
584
585 if (!blk_stat_is_active(rwb->cb))
586 rwb_arm_timer(rwb);
587}
588
589static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
590{
591 struct rq_wb *rwb = RQWB(rqos);
592 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
593}
594
595static void wbt_issue(struct rq_qos *rqos, struct request *rq)
596{
597 struct rq_wb *rwb = RQWB(rqos);
598
599 if (!rwb_enabled(rwb))
600 return;
601
602 /*
603 * Track sync issue, in case it takes a long time to complete. Allows us
604 * to react quicker, if a sync IO takes a long time to complete. Note
605 * that this is just a hint. The request can go away when it completes,
606 * so it's important we never dereference it. We only use the address to
607 * compare with, which is why we store the sync_issue time locally.
608 */
609 if (wbt_is_read(rq) && !rwb->sync_issue) {
610 rwb->sync_cookie = rq;
611 rwb->sync_issue = rq->io_start_time_ns;
612 }
613}
614
615static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
616{
617 struct rq_wb *rwb = RQWB(rqos);
618 if (!rwb_enabled(rwb))
619 return;
620 if (rq == rwb->sync_cookie) {
621 rwb->sync_issue = 0;
622 rwb->sync_cookie = NULL;
623 }
624}
625
626void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
627{
628 struct rq_qos *rqos = wbt_rq_qos(q);
629 if (rqos)
630 RQWB(rqos)->wc = write_cache_on;
631}
632
633/*
634 * Enable wbt if defaults are configured that way
635 */
636void wbt_enable_default(struct request_queue *q)
637{
638 struct rq_qos *rqos = wbt_rq_qos(q);
639
640 /* Throttling already enabled? */
641 if (rqos) {
642 if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
643 RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
644 return;
645 }
646
647 /* Queue not registered? Maybe shutting down... */
648 if (!blk_queue_registered(q))
649 return;
650
651 if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
652 wbt_init(q);
653}
654EXPORT_SYMBOL_GPL(wbt_enable_default);
655
656u64 wbt_default_latency_nsec(struct request_queue *q)
657{
658 /*
659 * We default to 2msec for non-rotational storage, and 75msec
660 * for rotational storage.
661 */
662 if (blk_queue_nonrot(q))
663 return 2000000ULL;
664 else
665 return 75000000ULL;
666}
667
668static int wbt_data_dir(const struct request *rq)
669{
670 const int op = req_op(rq);
671
672 if (op == REQ_OP_READ)
673 return READ;
674 else if (op_is_write(op))
675 return WRITE;
676
677 /* don't account */
678 return -1;
679}
680
681static void wbt_queue_depth_changed(struct rq_qos *rqos)
682{
683 RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
684 wbt_update_limits(RQWB(rqos));
685}
686
687static void wbt_exit(struct rq_qos *rqos)
688{
689 struct rq_wb *rwb = RQWB(rqos);
690 struct request_queue *q = rqos->q;
691
692 blk_stat_remove_callback(q, rwb->cb);
693 blk_stat_free_callback(rwb->cb);
694 kfree(rwb);
695}
696
697/*
698 * Disable wbt, if enabled by default.
699 */
700void wbt_disable_default(struct request_queue *q)
701{
702 struct rq_qos *rqos = wbt_rq_qos(q);
703 struct rq_wb *rwb;
704 if (!rqos)
705 return;
706 rwb = RQWB(rqos);
707 if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
708 blk_stat_deactivate(rwb->cb);
709 rwb->enable_state = WBT_STATE_OFF_DEFAULT;
710 }
711}
712EXPORT_SYMBOL_GPL(wbt_disable_default);
713
714#ifdef CONFIG_BLK_DEBUG_FS
715static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
716{
717 struct rq_qos *rqos = data;
718 struct rq_wb *rwb = RQWB(rqos);
719
720 seq_printf(m, "%llu\n", rwb->cur_win_nsec);
721 return 0;
722}
723
724static int wbt_enabled_show(void *data, struct seq_file *m)
725{
726 struct rq_qos *rqos = data;
727 struct rq_wb *rwb = RQWB(rqos);
728
729 seq_printf(m, "%d\n", rwb->enable_state);
730 return 0;
731}
732
733static int wbt_id_show(void *data, struct seq_file *m)
734{
735 struct rq_qos *rqos = data;
736
737 seq_printf(m, "%u\n", rqos->id);
738 return 0;
739}
740
741static int wbt_inflight_show(void *data, struct seq_file *m)
742{
743 struct rq_qos *rqos = data;
744 struct rq_wb *rwb = RQWB(rqos);
745 int i;
746
747 for (i = 0; i < WBT_NUM_RWQ; i++)
748 seq_printf(m, "%d: inflight %d\n", i,
749 atomic_read(&rwb->rq_wait[i].inflight));
750 return 0;
751}
752
753static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
754{
755 struct rq_qos *rqos = data;
756 struct rq_wb *rwb = RQWB(rqos);
757
758 seq_printf(m, "%lu\n", rwb->min_lat_nsec);
759 return 0;
760}
761
762static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
763{
764 struct rq_qos *rqos = data;
765 struct rq_wb *rwb = RQWB(rqos);
766
767 seq_printf(m, "%u\n", rwb->unknown_cnt);
768 return 0;
769}
770
771static int wbt_normal_show(void *data, struct seq_file *m)
772{
773 struct rq_qos *rqos = data;
774 struct rq_wb *rwb = RQWB(rqos);
775
776 seq_printf(m, "%u\n", rwb->wb_normal);
777 return 0;
778}
779
780static int wbt_background_show(void *data, struct seq_file *m)
781{
782 struct rq_qos *rqos = data;
783 struct rq_wb *rwb = RQWB(rqos);
784
785 seq_printf(m, "%u\n", rwb->wb_background);
786 return 0;
787}
788
789static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
790 {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
791 {"enabled", 0400, wbt_enabled_show},
792 {"id", 0400, wbt_id_show},
793 {"inflight", 0400, wbt_inflight_show},
794 {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
795 {"unknown_cnt", 0400, wbt_unknown_cnt_show},
796 {"wb_normal", 0400, wbt_normal_show},
797 {"wb_background", 0400, wbt_background_show},
798 {},
799};
800#endif
801
802static struct rq_qos_ops wbt_rqos_ops = {
803 .throttle = wbt_wait,
804 .issue = wbt_issue,
805 .track = wbt_track,
806 .requeue = wbt_requeue,
807 .done = wbt_done,
808 .cleanup = wbt_cleanup,
809 .queue_depth_changed = wbt_queue_depth_changed,
810 .exit = wbt_exit,
811#ifdef CONFIG_BLK_DEBUG_FS
812 .debugfs_attrs = wbt_debugfs_attrs,
813#endif
814};
815
816int wbt_init(struct request_queue *q)
817{
818 struct rq_wb *rwb;
819 int i;
820
821 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
822 if (!rwb)
823 return -ENOMEM;
824
825 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
826 if (!rwb->cb) {
827 kfree(rwb);
828 return -ENOMEM;
829 }
830
831 for (i = 0; i < WBT_NUM_RWQ; i++)
832 rq_wait_init(&rwb->rq_wait[i]);
833
834 rwb->rqos.id = RQ_QOS_WBT;
835 rwb->rqos.ops = &wbt_rqos_ops;
836 rwb->rqos.q = q;
837 rwb->last_comp = rwb->last_issue = jiffies;
838 rwb->win_nsec = RWB_WINDOW_NSEC;
839 rwb->enable_state = WBT_STATE_ON_DEFAULT;
840 rwb->wc = 1;
841 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
842
843 /*
844 * Assign rwb and add the stats callback.
845 */
846 rq_qos_add(q, &rwb->rqos);
847 blk_stat_add_callback(q, rwb->cb);
848
849 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
850
851 wbt_queue_depth_changed(&rwb->rqos);
852 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
853
854 return 0;
855}