Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * buffered writeback throttling. loosely based on CoDel. We can't drop
  4 * packets for IO scheduling, so the logic is something like this:
  5 *
  6 * - Monitor latencies in a defined window of time.
  7 * - If the minimum latency in the above window exceeds some target, increment
  8 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
  9 *   window is then shrunk to 100 / sqrt(scaling step + 1).
 10 * - For any window where we don't have solid data on what the latencies
 11 *   look like, retain status quo.
 12 * - If latencies look good, decrement scaling step.
 13 * - If we're only doing writes, allow the scaling step to go negative. This
 14 *   will temporarily boost write performance, snapping back to a stable
 15 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
 16 *   positive scaling steps where we shrink the monitoring window, a negative
 17 *   scaling step retains the default step==0 window size.
 18 *
 19 * Copyright (C) 2016 Jens Axboe
 20 *
 21 */
 22#include <linux/kernel.h>
 23#include <linux/blk_types.h>
 24#include <linux/slab.h>
 25#include <linux/backing-dev.h>
 26#include <linux/swap.h>
 27
 28#include "blk-wbt.h"
 29#include "blk-rq-qos.h"
 30#include "elevator.h"
 31
 32#define CREATE_TRACE_POINTS
 33#include <trace/events/wbt.h>
 34
 35static inline void wbt_clear_state(struct request *rq)
 36{
 37	rq->wbt_flags = 0;
 38}
 39
 40static inline enum wbt_flags wbt_flags(struct request *rq)
 41{
 42	return rq->wbt_flags;
 43}
 44
 45static inline bool wbt_is_tracked(struct request *rq)
 46{
 47	return rq->wbt_flags & WBT_TRACKED;
 48}
 49
 50static inline bool wbt_is_read(struct request *rq)
 51{
 52	return rq->wbt_flags & WBT_READ;
 53}
 54
 55enum {
 56	/*
 57	 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
 58	 * from here depending on device stats
 59	 */
 60	RWB_DEF_DEPTH	= 16,
 61
 62	/*
 63	 * 100msec window
 64	 */
 65	RWB_WINDOW_NSEC		= 100 * 1000 * 1000ULL,
 66
 67	/*
 68	 * Disregard stats, if we don't meet this minimum
 69	 */
 70	RWB_MIN_WRITE_SAMPLES	= 3,
 71
 72	/*
 73	 * If we have this number of consecutive windows with not enough
 74	 * information to scale up or down, scale up.
 75	 */
 76	RWB_UNKNOWN_BUMP	= 5,
 77};
 78
 79static inline bool rwb_enabled(struct rq_wb *rwb)
 80{
 81	return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
 82		      rwb->wb_normal != 0;
 83}
 84
 85static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
 86{
 87	if (rwb_enabled(rwb)) {
 88		const unsigned long cur = jiffies;
 89
 90		if (cur != *var)
 91			*var = cur;
 92	}
 93}
 94
 95/*
 96 * If a task was rate throttled in balance_dirty_pages() within the last
 97 * second or so, use that to indicate a higher cleaning rate.
 98 */
 99static bool wb_recent_wait(struct rq_wb *rwb)
100{
101	struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
102
103	return time_before(jiffies, wb->dirty_sleep + HZ);
104}
105
106static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
107					  enum wbt_flags wb_acct)
108{
109	if (wb_acct & WBT_KSWAPD)
110		return &rwb->rq_wait[WBT_RWQ_KSWAPD];
111	else if (wb_acct & WBT_DISCARD)
112		return &rwb->rq_wait[WBT_RWQ_DISCARD];
113
114	return &rwb->rq_wait[WBT_RWQ_BG];
115}
116
117static void rwb_wake_all(struct rq_wb *rwb)
118{
119	int i;
120
121	for (i = 0; i < WBT_NUM_RWQ; i++) {
122		struct rq_wait *rqw = &rwb->rq_wait[i];
123
124		if (wq_has_sleeper(&rqw->wait))
125			wake_up_all(&rqw->wait);
126	}
127}
128
129static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
130			 enum wbt_flags wb_acct)
131{
132	int inflight, limit;
133
134	inflight = atomic_dec_return(&rqw->inflight);
135
136	/*
137	 * wbt got disabled with IO in flight. Wake up any potential
138	 * waiters, we don't have to do more than that.
139	 */
140	if (unlikely(!rwb_enabled(rwb))) {
141		rwb_wake_all(rwb);
142		return;
143	}
144
145	/*
146	 * For discards, our limit is always the background. For writes, if
147	 * the device does write back caching, drop further down before we
148	 * wake people up.
149	 */
150	if (wb_acct & WBT_DISCARD)
151		limit = rwb->wb_background;
152	else if (rwb->wc && !wb_recent_wait(rwb))
153		limit = 0;
154	else
155		limit = rwb->wb_normal;
156
157	/*
158	 * Don't wake anyone up if we are above the normal limit.
159	 */
160	if (inflight && inflight >= limit)
161		return;
162
163	if (wq_has_sleeper(&rqw->wait)) {
164		int diff = limit - inflight;
165
166		if (!inflight || diff >= rwb->wb_background / 2)
167			wake_up_all(&rqw->wait);
168	}
169}
170
171static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
172{
173	struct rq_wb *rwb = RQWB(rqos);
174	struct rq_wait *rqw;
175
176	if (!(wb_acct & WBT_TRACKED))
177		return;
178
179	rqw = get_rq_wait(rwb, wb_acct);
180	wbt_rqw_done(rwb, rqw, wb_acct);
181}
182
183/*
184 * Called on completion of a request. Note that it's also called when
185 * a request is merged, when the request gets freed.
186 */
187static void wbt_done(struct rq_qos *rqos, struct request *rq)
188{
189	struct rq_wb *rwb = RQWB(rqos);
190
191	if (!wbt_is_tracked(rq)) {
192		if (rwb->sync_cookie == rq) {
193			rwb->sync_issue = 0;
194			rwb->sync_cookie = NULL;
195		}
196
197		if (wbt_is_read(rq))
198			wb_timestamp(rwb, &rwb->last_comp);
199	} else {
200		WARN_ON_ONCE(rq == rwb->sync_cookie);
201		__wbt_done(rqos, wbt_flags(rq));
202	}
203	wbt_clear_state(rq);
204}
205
206static inline bool stat_sample_valid(struct blk_rq_stat *stat)
207{
208	/*
209	 * We need at least one read sample, and a minimum of
210	 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
211	 * that it's writes impacting us, and not just some sole read on
212	 * a device that is in a lower power state.
213	 */
214	return (stat[READ].nr_samples >= 1 &&
215		stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
216}
217
218static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
219{
220	u64 now, issue = READ_ONCE(rwb->sync_issue);
221
222	if (!issue || !rwb->sync_cookie)
223		return 0;
224
225	now = ktime_to_ns(ktime_get());
226	return now - issue;
227}
228
229enum {
230	LAT_OK = 1,
231	LAT_UNKNOWN,
232	LAT_UNKNOWN_WRITES,
233	LAT_EXCEEDED,
234};
235
236static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
237{
238	struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
239	struct rq_depth *rqd = &rwb->rq_depth;
240	u64 thislat;
241
242	/*
243	 * If our stored sync issue exceeds the window size, or it
244	 * exceeds our min target AND we haven't logged any entries,
245	 * flag the latency as exceeded. wbt works off completion latencies,
246	 * but for a flooded device, a single sync IO can take a long time
247	 * to complete after being issued. If this time exceeds our
248	 * monitoring window AND we didn't see any other completions in that
249	 * window, then count that sync IO as a violation of the latency.
250	 */
251	thislat = rwb_sync_issue_lat(rwb);
252	if (thislat > rwb->cur_win_nsec ||
253	    (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
254		trace_wbt_lat(bdi, thislat);
255		return LAT_EXCEEDED;
256	}
257
258	/*
259	 * No read/write mix, if stat isn't valid
260	 */
261	if (!stat_sample_valid(stat)) {
262		/*
263		 * If we had writes in this stat window and the window is
264		 * current, we're only doing writes. If a task recently
265		 * waited or still has writes in flights, consider us doing
266		 * just writes as well.
267		 */
268		if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
269		    wbt_inflight(rwb))
270			return LAT_UNKNOWN_WRITES;
271		return LAT_UNKNOWN;
272	}
273
274	/*
275	 * If the 'min' latency exceeds our target, step down.
276	 */
277	if (stat[READ].min > rwb->min_lat_nsec) {
278		trace_wbt_lat(bdi, stat[READ].min);
279		trace_wbt_stat(bdi, stat);
280		return LAT_EXCEEDED;
281	}
282
283	if (rqd->scale_step)
284		trace_wbt_stat(bdi, stat);
285
286	return LAT_OK;
287}
288
289static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
290{
291	struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
292	struct rq_depth *rqd = &rwb->rq_depth;
293
294	trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
295			rwb->wb_background, rwb->wb_normal, rqd->max_depth);
296}
297
298static void calc_wb_limits(struct rq_wb *rwb)
299{
300	if (rwb->min_lat_nsec == 0) {
301		rwb->wb_normal = rwb->wb_background = 0;
302	} else if (rwb->rq_depth.max_depth <= 2) {
303		rwb->wb_normal = rwb->rq_depth.max_depth;
304		rwb->wb_background = 1;
305	} else {
306		rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
307		rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
308	}
309}
310
311static void scale_up(struct rq_wb *rwb)
312{
313	if (!rq_depth_scale_up(&rwb->rq_depth))
314		return;
315	calc_wb_limits(rwb);
316	rwb->unknown_cnt = 0;
317	rwb_wake_all(rwb);
318	rwb_trace_step(rwb, tracepoint_string("scale up"));
319}
320
321static void scale_down(struct rq_wb *rwb, bool hard_throttle)
322{
323	if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
324		return;
325	calc_wb_limits(rwb);
326	rwb->unknown_cnt = 0;
327	rwb_trace_step(rwb, tracepoint_string("scale down"));
328}
329
330static void rwb_arm_timer(struct rq_wb *rwb)
331{
332	struct rq_depth *rqd = &rwb->rq_depth;
333
334	if (rqd->scale_step > 0) {
335		/*
336		 * We should speed this up, using some variant of a fast
337		 * integer inverse square root calculation. Since we only do
338		 * this for every window expiration, it's not a huge deal,
339		 * though.
340		 */
341		rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
342					int_sqrt((rqd->scale_step + 1) << 8));
343	} else {
344		/*
345		 * For step < 0, we don't want to increase/decrease the
346		 * window size.
347		 */
348		rwb->cur_win_nsec = rwb->win_nsec;
349	}
350
351	blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
352}
353
354static void wb_timer_fn(struct blk_stat_callback *cb)
355{
356	struct rq_wb *rwb = cb->data;
357	struct rq_depth *rqd = &rwb->rq_depth;
358	unsigned int inflight = wbt_inflight(rwb);
359	int status;
360
361	if (!rwb->rqos.q->disk)
362		return;
363
364	status = latency_exceeded(rwb, cb->stat);
365
366	trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
367			inflight);
368
369	/*
370	 * If we exceeded the latency target, step down. If we did not,
371	 * step one level up. If we don't know enough to say either exceeded
372	 * or ok, then don't do anything.
373	 */
374	switch (status) {
375	case LAT_EXCEEDED:
376		scale_down(rwb, true);
377		break;
378	case LAT_OK:
379		scale_up(rwb);
380		break;
381	case LAT_UNKNOWN_WRITES:
382		/*
383		 * We started a the center step, but don't have a valid
384		 * read/write sample, but we do have writes going on.
385		 * Allow step to go negative, to increase write perf.
386		 */
387		scale_up(rwb);
388		break;
389	case LAT_UNKNOWN:
390		if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
391			break;
392		/*
393		 * We get here when previously scaled reduced depth, and we
394		 * currently don't have a valid read/write sample. For that
395		 * case, slowly return to center state (step == 0).
396		 */
397		if (rqd->scale_step > 0)
398			scale_up(rwb);
399		else if (rqd->scale_step < 0)
400			scale_down(rwb, false);
401		break;
402	default:
403		break;
404	}
405
406	/*
407	 * Re-arm timer, if we have IO in flight
408	 */
409	if (rqd->scale_step || inflight)
410		rwb_arm_timer(rwb);
411}
412
413static void wbt_update_limits(struct rq_wb *rwb)
414{
415	struct rq_depth *rqd = &rwb->rq_depth;
416
417	rqd->scale_step = 0;
418	rqd->scaled_max = false;
419
420	rq_depth_calc_max_depth(rqd);
421	calc_wb_limits(rwb);
422
423	rwb_wake_all(rwb);
424}
425
426bool wbt_disabled(struct request_queue *q)
427{
428	struct rq_qos *rqos = wbt_rq_qos(q);
429
430	return !rqos || RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT ||
431	       RQWB(rqos)->enable_state == WBT_STATE_OFF_MANUAL;
432}
433
434u64 wbt_get_min_lat(struct request_queue *q)
435{
436	struct rq_qos *rqos = wbt_rq_qos(q);
437	if (!rqos)
438		return 0;
439	return RQWB(rqos)->min_lat_nsec;
440}
441
442void wbt_set_min_lat(struct request_queue *q, u64 val)
443{
444	struct rq_qos *rqos = wbt_rq_qos(q);
445	if (!rqos)
446		return;
447
448	RQWB(rqos)->min_lat_nsec = val;
449	if (val)
450		RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
451	else
452		RQWB(rqos)->enable_state = WBT_STATE_OFF_MANUAL;
453
454	wbt_update_limits(RQWB(rqos));
455}
456
457
458static bool close_io(struct rq_wb *rwb)
459{
460	const unsigned long now = jiffies;
461
462	return time_before(now, rwb->last_issue + HZ / 10) ||
463		time_before(now, rwb->last_comp + HZ / 10);
464}
465
466#define REQ_HIPRIO	(REQ_SYNC | REQ_META | REQ_PRIO)
467
468static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
469{
470	unsigned int limit;
471
472	/*
473	 * If we got disabled, just return UINT_MAX. This ensures that
474	 * we'll properly inc a new IO, and dec+wakeup at the end.
475	 */
476	if (!rwb_enabled(rwb))
477		return UINT_MAX;
478
479	if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD)
480		return rwb->wb_background;
481
482	/*
483	 * At this point we know it's a buffered write. If this is
484	 * kswapd trying to free memory, or REQ_SYNC is set, then
485	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
486	 * that. If the write is marked as a background write, then use
487	 * the idle limit, or go to normal if we haven't had competing
488	 * IO for a bit.
489	 */
490	if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
491		limit = rwb->rq_depth.max_depth;
492	else if ((opf & REQ_BACKGROUND) || close_io(rwb)) {
493		/*
494		 * If less than 100ms since we completed unrelated IO,
495		 * limit us to half the depth for background writeback.
496		 */
497		limit = rwb->wb_background;
498	} else
499		limit = rwb->wb_normal;
500
501	return limit;
502}
503
504struct wbt_wait_data {
505	struct rq_wb *rwb;
506	enum wbt_flags wb_acct;
507	blk_opf_t opf;
508};
509
510static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
511{
512	struct wbt_wait_data *data = private_data;
513	return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf));
514}
515
516static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
517{
518	struct wbt_wait_data *data = private_data;
519	wbt_rqw_done(data->rwb, rqw, data->wb_acct);
520}
521
522/*
523 * Block if we will exceed our limit, or if we are currently waiting for
524 * the timer to kick off queuing again.
525 */
526static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
527		       blk_opf_t opf)
528{
529	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
530	struct wbt_wait_data data = {
531		.rwb = rwb,
532		.wb_acct = wb_acct,
533		.opf = opf,
534	};
535
536	rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
537}
538
539static inline bool wbt_should_throttle(struct bio *bio)
540{
541	switch (bio_op(bio)) {
542	case REQ_OP_WRITE:
543		/*
544		 * Don't throttle WRITE_ODIRECT
545		 */
546		if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
547		    (REQ_SYNC | REQ_IDLE))
548			return false;
549		fallthrough;
550	case REQ_OP_DISCARD:
551		return true;
552	default:
553		return false;
554	}
555}
556
557static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
558{
559	enum wbt_flags flags = 0;
560
561	if (!rwb_enabled(rwb))
562		return 0;
563
564	if (bio_op(bio) == REQ_OP_READ) {
565		flags = WBT_READ;
566	} else if (wbt_should_throttle(bio)) {
567		if (current_is_kswapd())
568			flags |= WBT_KSWAPD;
569		if (bio_op(bio) == REQ_OP_DISCARD)
570			flags |= WBT_DISCARD;
571		flags |= WBT_TRACKED;
572	}
573	return flags;
574}
575
576static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
577{
578	struct rq_wb *rwb = RQWB(rqos);
579	enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
580	__wbt_done(rqos, flags);
581}
582
583/*
 
584 * May sleep, if we have exceeded the writeback limits. Caller can pass
585 * in an irq held spinlock, if it holds one when calling this function.
586 * If we do sleep, we'll release and re-grab it.
587 */
588static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
589{
590	struct rq_wb *rwb = RQWB(rqos);
591	enum wbt_flags flags;
592
593	flags = bio_to_wbt_flags(rwb, bio);
594	if (!(flags & WBT_TRACKED)) {
595		if (flags & WBT_READ)
596			wb_timestamp(rwb, &rwb->last_issue);
597		return;
598	}
599
600	__wbt_wait(rwb, flags, bio->bi_opf);
601
602	if (!blk_stat_is_active(rwb->cb))
603		rwb_arm_timer(rwb);
604}
605
606static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
607{
608	struct rq_wb *rwb = RQWB(rqos);
609	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
610}
611
612static void wbt_issue(struct rq_qos *rqos, struct request *rq)
613{
614	struct rq_wb *rwb = RQWB(rqos);
615
616	if (!rwb_enabled(rwb))
617		return;
618
619	/*
620	 * Track sync issue, in case it takes a long time to complete. Allows us
621	 * to react quicker, if a sync IO takes a long time to complete. Note
622	 * that this is just a hint. The request can go away when it completes,
623	 * so it's important we never dereference it. We only use the address to
624	 * compare with, which is why we store the sync_issue time locally.
625	 */
626	if (wbt_is_read(rq) && !rwb->sync_issue) {
627		rwb->sync_cookie = rq;
628		rwb->sync_issue = rq->io_start_time_ns;
629	}
630}
631
632static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
633{
634	struct rq_wb *rwb = RQWB(rqos);
635	if (!rwb_enabled(rwb))
636		return;
637	if (rq == rwb->sync_cookie) {
638		rwb->sync_issue = 0;
639		rwb->sync_cookie = NULL;
640	}
641}
642
643void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
644{
645	struct rq_qos *rqos = wbt_rq_qos(q);
646	if (rqos)
647		RQWB(rqos)->wc = write_cache_on;
648}
649
650/*
651 * Enable wbt if defaults are configured that way
652 */
653void wbt_enable_default(struct request_queue *q)
654{
655	struct rq_qos *rqos;
656	bool disable_flag = q->elevator &&
657		    test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags);
658
659	/* Throttling already enabled? */
660	rqos = wbt_rq_qos(q);
661	if (rqos) {
662		if (!disable_flag &&
663		    RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
664			RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
665		return;
666	}
667
668	/* Queue not registered? Maybe shutting down... */
669	if (!blk_queue_registered(q))
670		return;
671
672	if (queue_is_mq(q) && !disable_flag)
673		wbt_init(q);
674}
675EXPORT_SYMBOL_GPL(wbt_enable_default);
676
677u64 wbt_default_latency_nsec(struct request_queue *q)
678{
679	/*
680	 * We default to 2msec for non-rotational storage, and 75msec
681	 * for rotational storage.
682	 */
683	if (blk_queue_nonrot(q))
684		return 2000000ULL;
685	else
686		return 75000000ULL;
687}
688
689static int wbt_data_dir(const struct request *rq)
690{
691	const enum req_op op = req_op(rq);
692
693	if (op == REQ_OP_READ)
694		return READ;
695	else if (op_is_write(op))
696		return WRITE;
697
698	/* don't account */
699	return -1;
700}
701
702static void wbt_queue_depth_changed(struct rq_qos *rqos)
703{
704	RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
705	wbt_update_limits(RQWB(rqos));
706}
707
708static void wbt_exit(struct rq_qos *rqos)
709{
710	struct rq_wb *rwb = RQWB(rqos);
711	struct request_queue *q = rqos->q;
712
713	blk_stat_remove_callback(q, rwb->cb);
714	blk_stat_free_callback(rwb->cb);
715	kfree(rwb);
716}
717
718/*
719 * Disable wbt, if enabled by default.
720 */
721void wbt_disable_default(struct request_queue *q)
722{
723	struct rq_qos *rqos = wbt_rq_qos(q);
724	struct rq_wb *rwb;
725	if (!rqos)
726		return;
727	rwb = RQWB(rqos);
728	if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
729		blk_stat_deactivate(rwb->cb);
730		rwb->enable_state = WBT_STATE_OFF_DEFAULT;
731	}
732}
733EXPORT_SYMBOL_GPL(wbt_disable_default);
734
735#ifdef CONFIG_BLK_DEBUG_FS
736static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
737{
738	struct rq_qos *rqos = data;
739	struct rq_wb *rwb = RQWB(rqos);
740
741	seq_printf(m, "%llu\n", rwb->cur_win_nsec);
742	return 0;
743}
744
745static int wbt_enabled_show(void *data, struct seq_file *m)
746{
747	struct rq_qos *rqos = data;
748	struct rq_wb *rwb = RQWB(rqos);
749
750	seq_printf(m, "%d\n", rwb->enable_state);
751	return 0;
752}
753
754static int wbt_id_show(void *data, struct seq_file *m)
755{
756	struct rq_qos *rqos = data;
757
758	seq_printf(m, "%u\n", rqos->id);
759	return 0;
760}
761
762static int wbt_inflight_show(void *data, struct seq_file *m)
763{
764	struct rq_qos *rqos = data;
765	struct rq_wb *rwb = RQWB(rqos);
766	int i;
767
768	for (i = 0; i < WBT_NUM_RWQ; i++)
769		seq_printf(m, "%d: inflight %d\n", i,
770			   atomic_read(&rwb->rq_wait[i].inflight));
771	return 0;
772}
773
774static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
775{
776	struct rq_qos *rqos = data;
777	struct rq_wb *rwb = RQWB(rqos);
778
779	seq_printf(m, "%lu\n", rwb->min_lat_nsec);
780	return 0;
781}
782
783static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
784{
785	struct rq_qos *rqos = data;
786	struct rq_wb *rwb = RQWB(rqos);
787
788	seq_printf(m, "%u\n", rwb->unknown_cnt);
789	return 0;
790}
791
792static int wbt_normal_show(void *data, struct seq_file *m)
793{
794	struct rq_qos *rqos = data;
795	struct rq_wb *rwb = RQWB(rqos);
796
797	seq_printf(m, "%u\n", rwb->wb_normal);
798	return 0;
799}
800
801static int wbt_background_show(void *data, struct seq_file *m)
802{
803	struct rq_qos *rqos = data;
804	struct rq_wb *rwb = RQWB(rqos);
805
806	seq_printf(m, "%u\n", rwb->wb_background);
807	return 0;
808}
809
810static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
811	{"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
812	{"enabled", 0400, wbt_enabled_show},
813	{"id", 0400, wbt_id_show},
814	{"inflight", 0400, wbt_inflight_show},
815	{"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
816	{"unknown_cnt", 0400, wbt_unknown_cnt_show},
817	{"wb_normal", 0400, wbt_normal_show},
818	{"wb_background", 0400, wbt_background_show},
819	{},
820};
821#endif
822
823static struct rq_qos_ops wbt_rqos_ops = {
824	.throttle = wbt_wait,
825	.issue = wbt_issue,
826	.track = wbt_track,
827	.requeue = wbt_requeue,
828	.done = wbt_done,
829	.cleanup = wbt_cleanup,
830	.queue_depth_changed = wbt_queue_depth_changed,
831	.exit = wbt_exit,
832#ifdef CONFIG_BLK_DEBUG_FS
833	.debugfs_attrs = wbt_debugfs_attrs,
834#endif
835};
836
837int wbt_init(struct request_queue *q)
838{
839	struct rq_wb *rwb;
840	int i;
841	int ret;
842
843	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
844	if (!rwb)
845		return -ENOMEM;
846
847	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
848	if (!rwb->cb) {
849		kfree(rwb);
850		return -ENOMEM;
851	}
852
853	for (i = 0; i < WBT_NUM_RWQ; i++)
854		rq_wait_init(&rwb->rq_wait[i]);
855
856	rwb->rqos.id = RQ_QOS_WBT;
857	rwb->rqos.ops = &wbt_rqos_ops;
858	rwb->rqos.q = q;
859	rwb->last_comp = rwb->last_issue = jiffies;
860	rwb->win_nsec = RWB_WINDOW_NSEC;
861	rwb->enable_state = WBT_STATE_ON_DEFAULT;
862	rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
863	rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
864	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
865
866	wbt_queue_depth_changed(&rwb->rqos);
867
868	/*
869	 * Assign rwb and add the stats callback.
870	 */
871	ret = rq_qos_add(q, &rwb->rqos);
872	if (ret)
873		goto err_free;
874
875	blk_stat_add_callback(q, rwb->cb);
876
877	return 0;
878
879err_free:
880	blk_stat_free_callback(rwb->cb);
881	kfree(rwb);
882	return ret;
883
 
884}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * buffered writeback throttling. loosely based on CoDel. We can't drop
  4 * packets for IO scheduling, so the logic is something like this:
  5 *
  6 * - Monitor latencies in a defined window of time.
  7 * - If the minimum latency in the above window exceeds some target, increment
  8 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
  9 *   window is then shrunk to 100 / sqrt(scaling step + 1).
 10 * - For any window where we don't have solid data on what the latencies
 11 *   look like, retain status quo.
 12 * - If latencies look good, decrement scaling step.
 13 * - If we're only doing writes, allow the scaling step to go negative. This
 14 *   will temporarily boost write performance, snapping back to a stable
 15 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
 16 *   positive scaling steps where we shrink the monitoring window, a negative
 17 *   scaling step retains the default step==0 window size.
 18 *
 19 * Copyright (C) 2016 Jens Axboe
 20 *
 21 */
 22#include <linux/kernel.h>
 23#include <linux/blk_types.h>
 24#include <linux/slab.h>
 25#include <linux/backing-dev.h>
 26#include <linux/swap.h>
 27
 28#include "blk-wbt.h"
 29#include "blk-rq-qos.h"
 
 30
 31#define CREATE_TRACE_POINTS
 32#include <trace/events/wbt.h>
 33
 34static inline void wbt_clear_state(struct request *rq)
 35{
 36	rq->wbt_flags = 0;
 37}
 38
 39static inline enum wbt_flags wbt_flags(struct request *rq)
 40{
 41	return rq->wbt_flags;
 42}
 43
 44static inline bool wbt_is_tracked(struct request *rq)
 45{
 46	return rq->wbt_flags & WBT_TRACKED;
 47}
 48
 49static inline bool wbt_is_read(struct request *rq)
 50{
 51	return rq->wbt_flags & WBT_READ;
 52}
 53
 54enum {
 55	/*
 56	 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
 57	 * from here depending on device stats
 58	 */
 59	RWB_DEF_DEPTH	= 16,
 60
 61	/*
 62	 * 100msec window
 63	 */
 64	RWB_WINDOW_NSEC		= 100 * 1000 * 1000ULL,
 65
 66	/*
 67	 * Disregard stats, if we don't meet this minimum
 68	 */
 69	RWB_MIN_WRITE_SAMPLES	= 3,
 70
 71	/*
 72	 * If we have this number of consecutive windows with not enough
 73	 * information to scale up or down, scale up.
 74	 */
 75	RWB_UNKNOWN_BUMP	= 5,
 76};
 77
 78static inline bool rwb_enabled(struct rq_wb *rwb)
 79{
 80	return rwb && rwb->wb_normal != 0;
 
 81}
 82
 83static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
 84{
 85	if (rwb_enabled(rwb)) {
 86		const unsigned long cur = jiffies;
 87
 88		if (cur != *var)
 89			*var = cur;
 90	}
 91}
 92
 93/*
 94 * If a task was rate throttled in balance_dirty_pages() within the last
 95 * second or so, use that to indicate a higher cleaning rate.
 96 */
 97static bool wb_recent_wait(struct rq_wb *rwb)
 98{
 99	struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
100
101	return time_before(jiffies, wb->dirty_sleep + HZ);
102}
103
104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
105					  enum wbt_flags wb_acct)
106{
107	if (wb_acct & WBT_KSWAPD)
108		return &rwb->rq_wait[WBT_RWQ_KSWAPD];
109	else if (wb_acct & WBT_DISCARD)
110		return &rwb->rq_wait[WBT_RWQ_DISCARD];
111
112	return &rwb->rq_wait[WBT_RWQ_BG];
113}
114
115static void rwb_wake_all(struct rq_wb *rwb)
116{
117	int i;
118
119	for (i = 0; i < WBT_NUM_RWQ; i++) {
120		struct rq_wait *rqw = &rwb->rq_wait[i];
121
122		if (wq_has_sleeper(&rqw->wait))
123			wake_up_all(&rqw->wait);
124	}
125}
126
127static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
128			 enum wbt_flags wb_acct)
129{
130	int inflight, limit;
131
132	inflight = atomic_dec_return(&rqw->inflight);
133
134	/*
135	 * wbt got disabled with IO in flight. Wake up any potential
136	 * waiters, we don't have to do more than that.
137	 */
138	if (unlikely(!rwb_enabled(rwb))) {
139		rwb_wake_all(rwb);
140		return;
141	}
142
143	/*
144	 * For discards, our limit is always the background. For writes, if
145	 * the device does write back caching, drop further down before we
146	 * wake people up.
147	 */
148	if (wb_acct & WBT_DISCARD)
149		limit = rwb->wb_background;
150	else if (rwb->wc && !wb_recent_wait(rwb))
151		limit = 0;
152	else
153		limit = rwb->wb_normal;
154
155	/*
156	 * Don't wake anyone up if we are above the normal limit.
157	 */
158	if (inflight && inflight >= limit)
159		return;
160
161	if (wq_has_sleeper(&rqw->wait)) {
162		int diff = limit - inflight;
163
164		if (!inflight || diff >= rwb->wb_background / 2)
165			wake_up_all(&rqw->wait);
166	}
167}
168
169static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
170{
171	struct rq_wb *rwb = RQWB(rqos);
172	struct rq_wait *rqw;
173
174	if (!(wb_acct & WBT_TRACKED))
175		return;
176
177	rqw = get_rq_wait(rwb, wb_acct);
178	wbt_rqw_done(rwb, rqw, wb_acct);
179}
180
181/*
182 * Called on completion of a request. Note that it's also called when
183 * a request is merged, when the request gets freed.
184 */
185static void wbt_done(struct rq_qos *rqos, struct request *rq)
186{
187	struct rq_wb *rwb = RQWB(rqos);
188
189	if (!wbt_is_tracked(rq)) {
190		if (rwb->sync_cookie == rq) {
191			rwb->sync_issue = 0;
192			rwb->sync_cookie = NULL;
193		}
194
195		if (wbt_is_read(rq))
196			wb_timestamp(rwb, &rwb->last_comp);
197	} else {
198		WARN_ON_ONCE(rq == rwb->sync_cookie);
199		__wbt_done(rqos, wbt_flags(rq));
200	}
201	wbt_clear_state(rq);
202}
203
204static inline bool stat_sample_valid(struct blk_rq_stat *stat)
205{
206	/*
207	 * We need at least one read sample, and a minimum of
208	 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
209	 * that it's writes impacting us, and not just some sole read on
210	 * a device that is in a lower power state.
211	 */
212	return (stat[READ].nr_samples >= 1 &&
213		stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
214}
215
216static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
217{
218	u64 now, issue = READ_ONCE(rwb->sync_issue);
219
220	if (!issue || !rwb->sync_cookie)
221		return 0;
222
223	now = ktime_to_ns(ktime_get());
224	return now - issue;
225}
226
227enum {
228	LAT_OK = 1,
229	LAT_UNKNOWN,
230	LAT_UNKNOWN_WRITES,
231	LAT_EXCEEDED,
232};
233
234static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
235{
236	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
237	struct rq_depth *rqd = &rwb->rq_depth;
238	u64 thislat;
239
240	/*
241	 * If our stored sync issue exceeds the window size, or it
242	 * exceeds our min target AND we haven't logged any entries,
243	 * flag the latency as exceeded. wbt works off completion latencies,
244	 * but for a flooded device, a single sync IO can take a long time
245	 * to complete after being issued. If this time exceeds our
246	 * monitoring window AND we didn't see any other completions in that
247	 * window, then count that sync IO as a violation of the latency.
248	 */
249	thislat = rwb_sync_issue_lat(rwb);
250	if (thislat > rwb->cur_win_nsec ||
251	    (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
252		trace_wbt_lat(bdi, thislat);
253		return LAT_EXCEEDED;
254	}
255
256	/*
257	 * No read/write mix, if stat isn't valid
258	 */
259	if (!stat_sample_valid(stat)) {
260		/*
261		 * If we had writes in this stat window and the window is
262		 * current, we're only doing writes. If a task recently
263		 * waited or still has writes in flights, consider us doing
264		 * just writes as well.
265		 */
266		if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
267		    wbt_inflight(rwb))
268			return LAT_UNKNOWN_WRITES;
269		return LAT_UNKNOWN;
270	}
271
272	/*
273	 * If the 'min' latency exceeds our target, step down.
274	 */
275	if (stat[READ].min > rwb->min_lat_nsec) {
276		trace_wbt_lat(bdi, stat[READ].min);
277		trace_wbt_stat(bdi, stat);
278		return LAT_EXCEEDED;
279	}
280
281	if (rqd->scale_step)
282		trace_wbt_stat(bdi, stat);
283
284	return LAT_OK;
285}
286
287static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
288{
289	struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
290	struct rq_depth *rqd = &rwb->rq_depth;
291
292	trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
293			rwb->wb_background, rwb->wb_normal, rqd->max_depth);
294}
295
296static void calc_wb_limits(struct rq_wb *rwb)
297{
298	if (rwb->min_lat_nsec == 0) {
299		rwb->wb_normal = rwb->wb_background = 0;
300	} else if (rwb->rq_depth.max_depth <= 2) {
301		rwb->wb_normal = rwb->rq_depth.max_depth;
302		rwb->wb_background = 1;
303	} else {
304		rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
305		rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
306	}
307}
308
309static void scale_up(struct rq_wb *rwb)
310{
311	if (!rq_depth_scale_up(&rwb->rq_depth))
312		return;
313	calc_wb_limits(rwb);
314	rwb->unknown_cnt = 0;
315	rwb_wake_all(rwb);
316	rwb_trace_step(rwb, tracepoint_string("scale up"));
317}
318
319static void scale_down(struct rq_wb *rwb, bool hard_throttle)
320{
321	if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
322		return;
323	calc_wb_limits(rwb);
324	rwb->unknown_cnt = 0;
325	rwb_trace_step(rwb, tracepoint_string("scale down"));
326}
327
328static void rwb_arm_timer(struct rq_wb *rwb)
329{
330	struct rq_depth *rqd = &rwb->rq_depth;
331
332	if (rqd->scale_step > 0) {
333		/*
334		 * We should speed this up, using some variant of a fast
335		 * integer inverse square root calculation. Since we only do
336		 * this for every window expiration, it's not a huge deal,
337		 * though.
338		 */
339		rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
340					int_sqrt((rqd->scale_step + 1) << 8));
341	} else {
342		/*
343		 * For step < 0, we don't want to increase/decrease the
344		 * window size.
345		 */
346		rwb->cur_win_nsec = rwb->win_nsec;
347	}
348
349	blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
350}
351
352static void wb_timer_fn(struct blk_stat_callback *cb)
353{
354	struct rq_wb *rwb = cb->data;
355	struct rq_depth *rqd = &rwb->rq_depth;
356	unsigned int inflight = wbt_inflight(rwb);
357	int status;
358
 
 
 
359	status = latency_exceeded(rwb, cb->stat);
360
361	trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
362			inflight);
363
364	/*
365	 * If we exceeded the latency target, step down. If we did not,
366	 * step one level up. If we don't know enough to say either exceeded
367	 * or ok, then don't do anything.
368	 */
369	switch (status) {
370	case LAT_EXCEEDED:
371		scale_down(rwb, true);
372		break;
373	case LAT_OK:
374		scale_up(rwb);
375		break;
376	case LAT_UNKNOWN_WRITES:
377		/*
378		 * We started a the center step, but don't have a valid
379		 * read/write sample, but we do have writes going on.
380		 * Allow step to go negative, to increase write perf.
381		 */
382		scale_up(rwb);
383		break;
384	case LAT_UNKNOWN:
385		if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
386			break;
387		/*
388		 * We get here when previously scaled reduced depth, and we
389		 * currently don't have a valid read/write sample. For that
390		 * case, slowly return to center state (step == 0).
391		 */
392		if (rqd->scale_step > 0)
393			scale_up(rwb);
394		else if (rqd->scale_step < 0)
395			scale_down(rwb, false);
396		break;
397	default:
398		break;
399	}
400
401	/*
402	 * Re-arm timer, if we have IO in flight
403	 */
404	if (rqd->scale_step || inflight)
405		rwb_arm_timer(rwb);
406}
407
408static void wbt_update_limits(struct rq_wb *rwb)
409{
410	struct rq_depth *rqd = &rwb->rq_depth;
411
412	rqd->scale_step = 0;
413	rqd->scaled_max = false;
414
415	rq_depth_calc_max_depth(rqd);
416	calc_wb_limits(rwb);
417
418	rwb_wake_all(rwb);
419}
420
 
 
 
 
 
 
 
 
421u64 wbt_get_min_lat(struct request_queue *q)
422{
423	struct rq_qos *rqos = wbt_rq_qos(q);
424	if (!rqos)
425		return 0;
426	return RQWB(rqos)->min_lat_nsec;
427}
428
429void wbt_set_min_lat(struct request_queue *q, u64 val)
430{
431	struct rq_qos *rqos = wbt_rq_qos(q);
432	if (!rqos)
433		return;
 
434	RQWB(rqos)->min_lat_nsec = val;
435	RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
 
 
 
 
436	wbt_update_limits(RQWB(rqos));
437}
438
439
440static bool close_io(struct rq_wb *rwb)
441{
442	const unsigned long now = jiffies;
443
444	return time_before(now, rwb->last_issue + HZ / 10) ||
445		time_before(now, rwb->last_comp + HZ / 10);
446}
447
448#define REQ_HIPRIO	(REQ_SYNC | REQ_META | REQ_PRIO)
449
450static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
451{
452	unsigned int limit;
453
454	/*
455	 * If we got disabled, just return UINT_MAX. This ensures that
456	 * we'll properly inc a new IO, and dec+wakeup at the end.
457	 */
458	if (!rwb_enabled(rwb))
459		return UINT_MAX;
460
461	if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
462		return rwb->wb_background;
463
464	/*
465	 * At this point we know it's a buffered write. If this is
466	 * kswapd trying to free memory, or REQ_SYNC is set, then
467	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
468	 * that. If the write is marked as a background write, then use
469	 * the idle limit, or go to normal if we haven't had competing
470	 * IO for a bit.
471	 */
472	if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
473		limit = rwb->rq_depth.max_depth;
474	else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
475		/*
476		 * If less than 100ms since we completed unrelated IO,
477		 * limit us to half the depth for background writeback.
478		 */
479		limit = rwb->wb_background;
480	} else
481		limit = rwb->wb_normal;
482
483	return limit;
484}
485
486struct wbt_wait_data {
487	struct rq_wb *rwb;
488	enum wbt_flags wb_acct;
489	unsigned long rw;
490};
491
492static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
493{
494	struct wbt_wait_data *data = private_data;
495	return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
496}
497
498static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
499{
500	struct wbt_wait_data *data = private_data;
501	wbt_rqw_done(data->rwb, rqw, data->wb_acct);
502}
503
504/*
505 * Block if we will exceed our limit, or if we are currently waiting for
506 * the timer to kick off queuing again.
507 */
508static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
509		       unsigned long rw)
510{
511	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
512	struct wbt_wait_data data = {
513		.rwb = rwb,
514		.wb_acct = wb_acct,
515		.rw = rw,
516	};
517
518	rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
519}
520
521static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
522{
523	switch (bio_op(bio)) {
524	case REQ_OP_WRITE:
525		/*
526		 * Don't throttle WRITE_ODIRECT
527		 */
528		if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
529		    (REQ_SYNC | REQ_IDLE))
530			return false;
531		fallthrough;
532	case REQ_OP_DISCARD:
533		return true;
534	default:
535		return false;
536	}
537}
538
539static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
540{
541	enum wbt_flags flags = 0;
542
543	if (!rwb_enabled(rwb))
544		return 0;
545
546	if (bio_op(bio) == REQ_OP_READ) {
547		flags = WBT_READ;
548	} else if (wbt_should_throttle(rwb, bio)) {
549		if (current_is_kswapd())
550			flags |= WBT_KSWAPD;
551		if (bio_op(bio) == REQ_OP_DISCARD)
552			flags |= WBT_DISCARD;
553		flags |= WBT_TRACKED;
554	}
555	return flags;
556}
557
558static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
559{
560	struct rq_wb *rwb = RQWB(rqos);
561	enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
562	__wbt_done(rqos, flags);
563}
564
565/*
566 * Returns true if the IO request should be accounted, false if not.
567 * May sleep, if we have exceeded the writeback limits. Caller can pass
568 * in an irq held spinlock, if it holds one when calling this function.
569 * If we do sleep, we'll release and re-grab it.
570 */
571static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
572{
573	struct rq_wb *rwb = RQWB(rqos);
574	enum wbt_flags flags;
575
576	flags = bio_to_wbt_flags(rwb, bio);
577	if (!(flags & WBT_TRACKED)) {
578		if (flags & WBT_READ)
579			wb_timestamp(rwb, &rwb->last_issue);
580		return;
581	}
582
583	__wbt_wait(rwb, flags, bio->bi_opf);
584
585	if (!blk_stat_is_active(rwb->cb))
586		rwb_arm_timer(rwb);
587}
588
589static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
590{
591	struct rq_wb *rwb = RQWB(rqos);
592	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
593}
594
595static void wbt_issue(struct rq_qos *rqos, struct request *rq)
596{
597	struct rq_wb *rwb = RQWB(rqos);
598
599	if (!rwb_enabled(rwb))
600		return;
601
602	/*
603	 * Track sync issue, in case it takes a long time to complete. Allows us
604	 * to react quicker, if a sync IO takes a long time to complete. Note
605	 * that this is just a hint. The request can go away when it completes,
606	 * so it's important we never dereference it. We only use the address to
607	 * compare with, which is why we store the sync_issue time locally.
608	 */
609	if (wbt_is_read(rq) && !rwb->sync_issue) {
610		rwb->sync_cookie = rq;
611		rwb->sync_issue = rq->io_start_time_ns;
612	}
613}
614
615static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
616{
617	struct rq_wb *rwb = RQWB(rqos);
618	if (!rwb_enabled(rwb))
619		return;
620	if (rq == rwb->sync_cookie) {
621		rwb->sync_issue = 0;
622		rwb->sync_cookie = NULL;
623	}
624}
625
626void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
627{
628	struct rq_qos *rqos = wbt_rq_qos(q);
629	if (rqos)
630		RQWB(rqos)->wc = write_cache_on;
631}
632
633/*
634 * Enable wbt if defaults are configured that way
635 */
636void wbt_enable_default(struct request_queue *q)
637{
638	struct rq_qos *rqos = wbt_rq_qos(q);
 
 
 
639	/* Throttling already enabled? */
640	if (rqos)
 
 
 
 
641		return;
 
642
643	/* Queue not registered? Maybe shutting down... */
644	if (!blk_queue_registered(q))
645		return;
646
647	if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
648		wbt_init(q);
649}
650EXPORT_SYMBOL_GPL(wbt_enable_default);
651
652u64 wbt_default_latency_nsec(struct request_queue *q)
653{
654	/*
655	 * We default to 2msec for non-rotational storage, and 75msec
656	 * for rotational storage.
657	 */
658	if (blk_queue_nonrot(q))
659		return 2000000ULL;
660	else
661		return 75000000ULL;
662}
663
664static int wbt_data_dir(const struct request *rq)
665{
666	const int op = req_op(rq);
667
668	if (op == REQ_OP_READ)
669		return READ;
670	else if (op_is_write(op))
671		return WRITE;
672
673	/* don't account */
674	return -1;
675}
676
677static void wbt_queue_depth_changed(struct rq_qos *rqos)
678{
679	RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
680	wbt_update_limits(RQWB(rqos));
681}
682
683static void wbt_exit(struct rq_qos *rqos)
684{
685	struct rq_wb *rwb = RQWB(rqos);
686	struct request_queue *q = rqos->q;
687
688	blk_stat_remove_callback(q, rwb->cb);
689	blk_stat_free_callback(rwb->cb);
690	kfree(rwb);
691}
692
693/*
694 * Disable wbt, if enabled by default.
695 */
696void wbt_disable_default(struct request_queue *q)
697{
698	struct rq_qos *rqos = wbt_rq_qos(q);
699	struct rq_wb *rwb;
700	if (!rqos)
701		return;
702	rwb = RQWB(rqos);
703	if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
704		blk_stat_deactivate(rwb->cb);
705		rwb->wb_normal = 0;
706	}
707}
708EXPORT_SYMBOL_GPL(wbt_disable_default);
709
710#ifdef CONFIG_BLK_DEBUG_FS
711static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
712{
713	struct rq_qos *rqos = data;
714	struct rq_wb *rwb = RQWB(rqos);
715
716	seq_printf(m, "%llu\n", rwb->cur_win_nsec);
717	return 0;
718}
719
720static int wbt_enabled_show(void *data, struct seq_file *m)
721{
722	struct rq_qos *rqos = data;
723	struct rq_wb *rwb = RQWB(rqos);
724
725	seq_printf(m, "%d\n", rwb->enable_state);
726	return 0;
727}
728
729static int wbt_id_show(void *data, struct seq_file *m)
730{
731	struct rq_qos *rqos = data;
732
733	seq_printf(m, "%u\n", rqos->id);
734	return 0;
735}
736
737static int wbt_inflight_show(void *data, struct seq_file *m)
738{
739	struct rq_qos *rqos = data;
740	struct rq_wb *rwb = RQWB(rqos);
741	int i;
742
743	for (i = 0; i < WBT_NUM_RWQ; i++)
744		seq_printf(m, "%d: inflight %d\n", i,
745			   atomic_read(&rwb->rq_wait[i].inflight));
746	return 0;
747}
748
749static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
750{
751	struct rq_qos *rqos = data;
752	struct rq_wb *rwb = RQWB(rqos);
753
754	seq_printf(m, "%lu\n", rwb->min_lat_nsec);
755	return 0;
756}
757
758static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
759{
760	struct rq_qos *rqos = data;
761	struct rq_wb *rwb = RQWB(rqos);
762
763	seq_printf(m, "%u\n", rwb->unknown_cnt);
764	return 0;
765}
766
767static int wbt_normal_show(void *data, struct seq_file *m)
768{
769	struct rq_qos *rqos = data;
770	struct rq_wb *rwb = RQWB(rqos);
771
772	seq_printf(m, "%u\n", rwb->wb_normal);
773	return 0;
774}
775
776static int wbt_background_show(void *data, struct seq_file *m)
777{
778	struct rq_qos *rqos = data;
779	struct rq_wb *rwb = RQWB(rqos);
780
781	seq_printf(m, "%u\n", rwb->wb_background);
782	return 0;
783}
784
785static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
786	{"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
787	{"enabled", 0400, wbt_enabled_show},
788	{"id", 0400, wbt_id_show},
789	{"inflight", 0400, wbt_inflight_show},
790	{"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
791	{"unknown_cnt", 0400, wbt_unknown_cnt_show},
792	{"wb_normal", 0400, wbt_normal_show},
793	{"wb_background", 0400, wbt_background_show},
794	{},
795};
796#endif
797
798static struct rq_qos_ops wbt_rqos_ops = {
799	.throttle = wbt_wait,
800	.issue = wbt_issue,
801	.track = wbt_track,
802	.requeue = wbt_requeue,
803	.done = wbt_done,
804	.cleanup = wbt_cleanup,
805	.queue_depth_changed = wbt_queue_depth_changed,
806	.exit = wbt_exit,
807#ifdef CONFIG_BLK_DEBUG_FS
808	.debugfs_attrs = wbt_debugfs_attrs,
809#endif
810};
811
812int wbt_init(struct request_queue *q)
813{
814	struct rq_wb *rwb;
815	int i;
 
816
817	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
818	if (!rwb)
819		return -ENOMEM;
820
821	rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
822	if (!rwb->cb) {
823		kfree(rwb);
824		return -ENOMEM;
825	}
826
827	for (i = 0; i < WBT_NUM_RWQ; i++)
828		rq_wait_init(&rwb->rq_wait[i]);
829
830	rwb->rqos.id = RQ_QOS_WBT;
831	rwb->rqos.ops = &wbt_rqos_ops;
832	rwb->rqos.q = q;
833	rwb->last_comp = rwb->last_issue = jiffies;
834	rwb->win_nsec = RWB_WINDOW_NSEC;
835	rwb->enable_state = WBT_STATE_ON_DEFAULT;
836	rwb->wc = 1;
837	rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
838	wbt_update_limits(rwb);
 
 
839
840	/*
841	 * Assign rwb and add the stats callback.
842	 */
843	rq_qos_add(q, &rwb->rqos);
 
 
 
844	blk_stat_add_callback(q, rwb->cb);
845
846	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
847
848	wbt_queue_depth_changed(&rwb->rqos);
849	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 
 
850
851	return 0;
852}