Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
4 * scalable techniques.
5 *
6 * Copyright (C) 2017 Facebook
7 */
8
9#include <linux/kernel.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
12#include <linux/module.h>
13#include <linux/sbitmap.h>
14
15#include <trace/events/block.h>
16
17#include "elevator.h"
18#include "blk.h"
19#include "blk-mq.h"
20#include "blk-mq-debugfs.h"
21#include "blk-mq-sched.h"
22#include "blk-mq-tag.h"
23
24#define CREATE_TRACE_POINTS
25#include <trace/events/kyber.h>
26
27/*
28 * Scheduling domains: the device is divided into multiple domains based on the
29 * request type.
30 */
31enum {
32 KYBER_READ,
33 KYBER_WRITE,
34 KYBER_DISCARD,
35 KYBER_OTHER,
36 KYBER_NUM_DOMAINS,
37};
38
39static const char *kyber_domain_names[] = {
40 [KYBER_READ] = "READ",
41 [KYBER_WRITE] = "WRITE",
42 [KYBER_DISCARD] = "DISCARD",
43 [KYBER_OTHER] = "OTHER",
44};
45
46enum {
47 /*
48 * In order to prevent starvation of synchronous requests by a flood of
49 * asynchronous requests, we reserve 25% of requests for synchronous
50 * operations.
51 */
52 KYBER_ASYNC_PERCENT = 75,
53};
54
55/*
56 * Maximum device-wide depth for each scheduling domain.
57 *
58 * Even for fast devices with lots of tags like NVMe, you can saturate the
59 * device with only a fraction of the maximum possible queue depth. So, we cap
60 * these to a reasonable value.
61 */
62static const unsigned int kyber_depth[] = {
63 [KYBER_READ] = 256,
64 [KYBER_WRITE] = 128,
65 [KYBER_DISCARD] = 64,
66 [KYBER_OTHER] = 16,
67};
68
69/*
70 * Default latency targets for each scheduling domain.
71 */
72static const u64 kyber_latency_targets[] = {
73 [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
74 [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
75 [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
76};
77
78/*
79 * Batch size (number of requests we'll dispatch in a row) for each scheduling
80 * domain.
81 */
82static const unsigned int kyber_batch_size[] = {
83 [KYBER_READ] = 16,
84 [KYBER_WRITE] = 8,
85 [KYBER_DISCARD] = 1,
86 [KYBER_OTHER] = 1,
87};
88
89/*
90 * Requests latencies are recorded in a histogram with buckets defined relative
91 * to the target latency:
92 *
93 * <= 1/4 * target latency
94 * <= 1/2 * target latency
95 * <= 3/4 * target latency
96 * <= target latency
97 * <= 1 1/4 * target latency
98 * <= 1 1/2 * target latency
99 * <= 1 3/4 * target latency
100 * > 1 3/4 * target latency
101 */
102enum {
103 /*
104 * The width of the latency histogram buckets is
105 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
106 */
107 KYBER_LATENCY_SHIFT = 2,
108 /*
109 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
110 * thus, "good".
111 */
112 KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
113 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
114 KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
115};
116
117/*
118 * We measure both the total latency and the I/O latency (i.e., latency after
119 * submitting to the device).
120 */
121enum {
122 KYBER_TOTAL_LATENCY,
123 KYBER_IO_LATENCY,
124};
125
126static const char *kyber_latency_type_names[] = {
127 [KYBER_TOTAL_LATENCY] = "total",
128 [KYBER_IO_LATENCY] = "I/O",
129};
130
131/*
132 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
133 * domain except for KYBER_OTHER.
134 */
135struct kyber_cpu_latency {
136 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
137};
138
139/*
140 * There is a same mapping between ctx & hctx and kcq & khd,
141 * we use request->mq_ctx->index_hw to index the kcq in khd.
142 */
143struct kyber_ctx_queue {
144 /*
145 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
146 * Also protect the rqs on rq_list when merge.
147 */
148 spinlock_t lock;
149 struct list_head rq_list[KYBER_NUM_DOMAINS];
150} ____cacheline_aligned_in_smp;
151
152struct kyber_queue_data {
153 struct request_queue *q;
154 dev_t dev;
155
156 /*
157 * Each scheduling domain has a limited number of in-flight requests
158 * device-wide, limited by these tokens.
159 */
160 struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
161
162 /*
163 * Async request percentage, converted to per-word depth for
164 * sbitmap_get_shallow().
165 */
166 unsigned int async_depth;
167
168 struct kyber_cpu_latency __percpu *cpu_latency;
169
170 /* Timer for stats aggregation and adjusting domain tokens. */
171 struct timer_list timer;
172
173 unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
174
175 unsigned long latency_timeout[KYBER_OTHER];
176
177 int domain_p99[KYBER_OTHER];
178
179 /* Target latencies in nanoseconds. */
180 u64 latency_targets[KYBER_OTHER];
181};
182
183struct kyber_hctx_data {
184 spinlock_t lock;
185 struct list_head rqs[KYBER_NUM_DOMAINS];
186 unsigned int cur_domain;
187 unsigned int batching;
188 struct kyber_ctx_queue *kcqs;
189 struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
190 struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
191 struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
192 atomic_t wait_index[KYBER_NUM_DOMAINS];
193};
194
195static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
196 void *key);
197
198static unsigned int kyber_sched_domain(blk_opf_t opf)
199{
200 switch (opf & REQ_OP_MASK) {
201 case REQ_OP_READ:
202 return KYBER_READ;
203 case REQ_OP_WRITE:
204 return KYBER_WRITE;
205 case REQ_OP_DISCARD:
206 return KYBER_DISCARD;
207 default:
208 return KYBER_OTHER;
209 }
210}
211
212static void flush_latency_buckets(struct kyber_queue_data *kqd,
213 struct kyber_cpu_latency *cpu_latency,
214 unsigned int sched_domain, unsigned int type)
215{
216 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
217 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
218 unsigned int bucket;
219
220 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
221 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
222}
223
224/*
225 * Calculate the histogram bucket with the given percentile rank, or -1 if there
226 * aren't enough samples yet.
227 */
228static int calculate_percentile(struct kyber_queue_data *kqd,
229 unsigned int sched_domain, unsigned int type,
230 unsigned int percentile)
231{
232 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
233 unsigned int bucket, samples = 0, percentile_samples;
234
235 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
236 samples += buckets[bucket];
237
238 if (!samples)
239 return -1;
240
241 /*
242 * We do the calculation once we have 500 samples or one second passes
243 * since the first sample was recorded, whichever comes first.
244 */
245 if (!kqd->latency_timeout[sched_domain])
246 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
247 if (samples < 500 &&
248 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
249 return -1;
250 }
251 kqd->latency_timeout[sched_domain] = 0;
252
253 percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
254 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
255 if (buckets[bucket] >= percentile_samples)
256 break;
257 percentile_samples -= buckets[bucket];
258 }
259 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
260
261 trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
262 kyber_latency_type_names[type], percentile,
263 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
264
265 return bucket;
266}
267
268static void kyber_resize_domain(struct kyber_queue_data *kqd,
269 unsigned int sched_domain, unsigned int depth)
270{
271 depth = clamp(depth, 1U, kyber_depth[sched_domain]);
272 if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
273 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
274 trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
275 depth);
276 }
277}
278
279static void kyber_timer_fn(struct timer_list *t)
280{
281 struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
282 unsigned int sched_domain;
283 int cpu;
284 bool bad = false;
285
286 /* Sum all of the per-cpu latency histograms. */
287 for_each_online_cpu(cpu) {
288 struct kyber_cpu_latency *cpu_latency;
289
290 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
291 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
292 flush_latency_buckets(kqd, cpu_latency, sched_domain,
293 KYBER_TOTAL_LATENCY);
294 flush_latency_buckets(kqd, cpu_latency, sched_domain,
295 KYBER_IO_LATENCY);
296 }
297 }
298
299 /*
300 * Check if any domains have a high I/O latency, which might indicate
301 * congestion in the device. Note that we use the p90; we don't want to
302 * be too sensitive to outliers here.
303 */
304 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
305 int p90;
306
307 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
308 90);
309 if (p90 >= KYBER_GOOD_BUCKETS)
310 bad = true;
311 }
312
313 /*
314 * Adjust the scheduling domain depths. If we determined that there was
315 * congestion, we throttle all domains with good latencies. Either way,
316 * we ease up on throttling domains with bad latencies.
317 */
318 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
319 unsigned int orig_depth, depth;
320 int p99;
321
322 p99 = calculate_percentile(kqd, sched_domain,
323 KYBER_TOTAL_LATENCY, 99);
324 /*
325 * This is kind of subtle: different domains will not
326 * necessarily have enough samples to calculate the latency
327 * percentiles during the same window, so we have to remember
328 * the p99 for the next time we observe congestion; once we do,
329 * we don't want to throttle again until we get more data, so we
330 * reset it to -1.
331 */
332 if (bad) {
333 if (p99 < 0)
334 p99 = kqd->domain_p99[sched_domain];
335 kqd->domain_p99[sched_domain] = -1;
336 } else if (p99 >= 0) {
337 kqd->domain_p99[sched_domain] = p99;
338 }
339 if (p99 < 0)
340 continue;
341
342 /*
343 * If this domain has bad latency, throttle less. Otherwise,
344 * throttle more iff we determined that there is congestion.
345 *
346 * The new depth is scaled linearly with the p99 latency vs the
347 * latency target. E.g., if the p99 is 3/4 of the target, then
348 * we throttle down to 3/4 of the current depth, and if the p99
349 * is 2x the target, then we double the depth.
350 */
351 if (bad || p99 >= KYBER_GOOD_BUCKETS) {
352 orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
353 depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
354 kyber_resize_domain(kqd, sched_domain, depth);
355 }
356 }
357}
358
359static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
360{
361 struct kyber_queue_data *kqd;
362 int ret = -ENOMEM;
363 int i;
364
365 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
366 if (!kqd)
367 goto err;
368
369 kqd->q = q;
370 kqd->dev = disk_devt(q->disk);
371
372 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
373 GFP_KERNEL | __GFP_ZERO);
374 if (!kqd->cpu_latency)
375 goto err_kqd;
376
377 timer_setup(&kqd->timer, kyber_timer_fn, 0);
378
379 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
380 WARN_ON(!kyber_depth[i]);
381 WARN_ON(!kyber_batch_size[i]);
382 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
383 kyber_depth[i], -1, false,
384 GFP_KERNEL, q->node);
385 if (ret) {
386 while (--i >= 0)
387 sbitmap_queue_free(&kqd->domain_tokens[i]);
388 goto err_buckets;
389 }
390 }
391
392 for (i = 0; i < KYBER_OTHER; i++) {
393 kqd->domain_p99[i] = -1;
394 kqd->latency_targets[i] = kyber_latency_targets[i];
395 }
396
397 return kqd;
398
399err_buckets:
400 free_percpu(kqd->cpu_latency);
401err_kqd:
402 kfree(kqd);
403err:
404 return ERR_PTR(ret);
405}
406
407static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
408{
409 struct kyber_queue_data *kqd;
410 struct elevator_queue *eq;
411
412 eq = elevator_alloc(q, e);
413 if (!eq)
414 return -ENOMEM;
415
416 kqd = kyber_queue_data_alloc(q);
417 if (IS_ERR(kqd)) {
418 kobject_put(&eq->kobj);
419 return PTR_ERR(kqd);
420 }
421
422 blk_stat_enable_accounting(q);
423
424 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
425
426 eq->elevator_data = kqd;
427 q->elevator = eq;
428
429 return 0;
430}
431
432static void kyber_exit_sched(struct elevator_queue *e)
433{
434 struct kyber_queue_data *kqd = e->elevator_data;
435 int i;
436
437 timer_shutdown_sync(&kqd->timer);
438 blk_stat_disable_accounting(kqd->q);
439
440 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
441 sbitmap_queue_free(&kqd->domain_tokens[i]);
442 free_percpu(kqd->cpu_latency);
443 kfree(kqd);
444}
445
446static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
447{
448 unsigned int i;
449
450 spin_lock_init(&kcq->lock);
451 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
452 INIT_LIST_HEAD(&kcq->rq_list[i]);
453}
454
455static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
456{
457 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
458 struct blk_mq_tags *tags = hctx->sched_tags;
459 unsigned int shift = tags->bitmap_tags.sb.shift;
460
461 kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
462
463 sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
464}
465
466static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
467{
468 struct kyber_hctx_data *khd;
469 int i;
470
471 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
472 if (!khd)
473 return -ENOMEM;
474
475 khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
476 sizeof(struct kyber_ctx_queue),
477 GFP_KERNEL, hctx->numa_node);
478 if (!khd->kcqs)
479 goto err_khd;
480
481 for (i = 0; i < hctx->nr_ctx; i++)
482 kyber_ctx_queue_init(&khd->kcqs[i]);
483
484 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
485 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
486 ilog2(8), GFP_KERNEL, hctx->numa_node,
487 false, false)) {
488 while (--i >= 0)
489 sbitmap_free(&khd->kcq_map[i]);
490 goto err_kcqs;
491 }
492 }
493
494 spin_lock_init(&khd->lock);
495
496 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
497 INIT_LIST_HEAD(&khd->rqs[i]);
498 khd->domain_wait[i].sbq = NULL;
499 init_waitqueue_func_entry(&khd->domain_wait[i].wait,
500 kyber_domain_wake);
501 khd->domain_wait[i].wait.private = hctx;
502 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
503 atomic_set(&khd->wait_index[i], 0);
504 }
505
506 khd->cur_domain = 0;
507 khd->batching = 0;
508
509 hctx->sched_data = khd;
510 kyber_depth_updated(hctx);
511
512 return 0;
513
514err_kcqs:
515 kfree(khd->kcqs);
516err_khd:
517 kfree(khd);
518 return -ENOMEM;
519}
520
521static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
522{
523 struct kyber_hctx_data *khd = hctx->sched_data;
524 int i;
525
526 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
527 sbitmap_free(&khd->kcq_map[i]);
528 kfree(khd->kcqs);
529 kfree(hctx->sched_data);
530}
531
532static int rq_get_domain_token(struct request *rq)
533{
534 return (long)rq->elv.priv[0];
535}
536
537static void rq_set_domain_token(struct request *rq, int token)
538{
539 rq->elv.priv[0] = (void *)(long)token;
540}
541
542static void rq_clear_domain_token(struct kyber_queue_data *kqd,
543 struct request *rq)
544{
545 unsigned int sched_domain;
546 int nr;
547
548 nr = rq_get_domain_token(rq);
549 if (nr != -1) {
550 sched_domain = kyber_sched_domain(rq->cmd_flags);
551 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
552 rq->mq_ctx->cpu);
553 }
554}
555
556static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
557{
558 /*
559 * We use the scheduler tags as per-hardware queue queueing tokens.
560 * Async requests can be limited at this stage.
561 */
562 if (!op_is_sync(opf)) {
563 struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
564
565 data->shallow_depth = kqd->async_depth;
566 }
567}
568
569static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
570 unsigned int nr_segs)
571{
572 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
573 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
574 struct kyber_hctx_data *khd = hctx->sched_data;
575 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
576 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
577 struct list_head *rq_list = &kcq->rq_list[sched_domain];
578 bool merged;
579
580 spin_lock(&kcq->lock);
581 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
582 spin_unlock(&kcq->lock);
583
584 return merged;
585}
586
587static void kyber_prepare_request(struct request *rq)
588{
589 rq_set_domain_token(rq, -1);
590}
591
592static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
593 struct list_head *rq_list, bool at_head)
594{
595 struct kyber_hctx_data *khd = hctx->sched_data;
596 struct request *rq, *next;
597
598 list_for_each_entry_safe(rq, next, rq_list, queuelist) {
599 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
600 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
601 struct list_head *head = &kcq->rq_list[sched_domain];
602
603 spin_lock(&kcq->lock);
604 trace_block_rq_insert(rq);
605 if (at_head)
606 list_move(&rq->queuelist, head);
607 else
608 list_move_tail(&rq->queuelist, head);
609 sbitmap_set_bit(&khd->kcq_map[sched_domain],
610 rq->mq_ctx->index_hw[hctx->type]);
611 spin_unlock(&kcq->lock);
612 }
613}
614
615static void kyber_finish_request(struct request *rq)
616{
617 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
618
619 rq_clear_domain_token(kqd, rq);
620}
621
622static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
623 unsigned int sched_domain, unsigned int type,
624 u64 target, u64 latency)
625{
626 unsigned int bucket;
627 u64 divisor;
628
629 if (latency > 0) {
630 divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
631 bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
632 KYBER_LATENCY_BUCKETS - 1);
633 } else {
634 bucket = 0;
635 }
636
637 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
638}
639
640static void kyber_completed_request(struct request *rq, u64 now)
641{
642 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
643 struct kyber_cpu_latency *cpu_latency;
644 unsigned int sched_domain;
645 u64 target;
646
647 sched_domain = kyber_sched_domain(rq->cmd_flags);
648 if (sched_domain == KYBER_OTHER)
649 return;
650
651 cpu_latency = get_cpu_ptr(kqd->cpu_latency);
652 target = kqd->latency_targets[sched_domain];
653 add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
654 target, now - rq->start_time_ns);
655 add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
656 now - rq->io_start_time_ns);
657 put_cpu_ptr(kqd->cpu_latency);
658
659 timer_reduce(&kqd->timer, jiffies + HZ / 10);
660}
661
662struct flush_kcq_data {
663 struct kyber_hctx_data *khd;
664 unsigned int sched_domain;
665 struct list_head *list;
666};
667
668static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
669{
670 struct flush_kcq_data *flush_data = data;
671 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
672
673 spin_lock(&kcq->lock);
674 list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
675 flush_data->list);
676 sbitmap_clear_bit(sb, bitnr);
677 spin_unlock(&kcq->lock);
678
679 return true;
680}
681
682static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
683 unsigned int sched_domain,
684 struct list_head *list)
685{
686 struct flush_kcq_data data = {
687 .khd = khd,
688 .sched_domain = sched_domain,
689 .list = list,
690 };
691
692 sbitmap_for_each_set(&khd->kcq_map[sched_domain],
693 flush_busy_kcq, &data);
694}
695
696static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
697 void *key)
698{
699 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
700 struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
701
702 sbitmap_del_wait_queue(wait);
703 blk_mq_run_hw_queue(hctx, true);
704 return 1;
705}
706
707static int kyber_get_domain_token(struct kyber_queue_data *kqd,
708 struct kyber_hctx_data *khd,
709 struct blk_mq_hw_ctx *hctx)
710{
711 unsigned int sched_domain = khd->cur_domain;
712 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
713 struct sbq_wait *wait = &khd->domain_wait[sched_domain];
714 struct sbq_wait_state *ws;
715 int nr;
716
717 nr = __sbitmap_queue_get(domain_tokens);
718
719 /*
720 * If we failed to get a domain token, make sure the hardware queue is
721 * run when one becomes available. Note that this is serialized on
722 * khd->lock, but we still need to be careful about the waker.
723 */
724 if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
725 ws = sbq_wait_ptr(domain_tokens,
726 &khd->wait_index[sched_domain]);
727 khd->domain_ws[sched_domain] = ws;
728 sbitmap_add_wait_queue(domain_tokens, ws, wait);
729
730 /*
731 * Try again in case a token was freed before we got on the wait
732 * queue.
733 */
734 nr = __sbitmap_queue_get(domain_tokens);
735 }
736
737 /*
738 * If we got a token while we were on the wait queue, remove ourselves
739 * from the wait queue to ensure that all wake ups make forward
740 * progress. It's possible that the waker already deleted the entry
741 * between the !list_empty_careful() check and us grabbing the lock, but
742 * list_del_init() is okay with that.
743 */
744 if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
745 ws = khd->domain_ws[sched_domain];
746 spin_lock_irq(&ws->wait.lock);
747 sbitmap_del_wait_queue(wait);
748 spin_unlock_irq(&ws->wait.lock);
749 }
750
751 return nr;
752}
753
754static struct request *
755kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
756 struct kyber_hctx_data *khd,
757 struct blk_mq_hw_ctx *hctx)
758{
759 struct list_head *rqs;
760 struct request *rq;
761 int nr;
762
763 rqs = &khd->rqs[khd->cur_domain];
764
765 /*
766 * If we already have a flushed request, then we just need to get a
767 * token for it. Otherwise, if there are pending requests in the kcqs,
768 * flush the kcqs, but only if we can get a token. If not, we should
769 * leave the requests in the kcqs so that they can be merged. Note that
770 * khd->lock serializes the flushes, so if we observed any bit set in
771 * the kcq_map, we will always get a request.
772 */
773 rq = list_first_entry_or_null(rqs, struct request, queuelist);
774 if (rq) {
775 nr = kyber_get_domain_token(kqd, khd, hctx);
776 if (nr >= 0) {
777 khd->batching++;
778 rq_set_domain_token(rq, nr);
779 list_del_init(&rq->queuelist);
780 return rq;
781 } else {
782 trace_kyber_throttled(kqd->dev,
783 kyber_domain_names[khd->cur_domain]);
784 }
785 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
786 nr = kyber_get_domain_token(kqd, khd, hctx);
787 if (nr >= 0) {
788 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
789 rq = list_first_entry(rqs, struct request, queuelist);
790 khd->batching++;
791 rq_set_domain_token(rq, nr);
792 list_del_init(&rq->queuelist);
793 return rq;
794 } else {
795 trace_kyber_throttled(kqd->dev,
796 kyber_domain_names[khd->cur_domain]);
797 }
798 }
799
800 /* There were either no pending requests or no tokens. */
801 return NULL;
802}
803
804static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
805{
806 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
807 struct kyber_hctx_data *khd = hctx->sched_data;
808 struct request *rq;
809 int i;
810
811 spin_lock(&khd->lock);
812
813 /*
814 * First, if we are still entitled to batch, try to dispatch a request
815 * from the batch.
816 */
817 if (khd->batching < kyber_batch_size[khd->cur_domain]) {
818 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
819 if (rq)
820 goto out;
821 }
822
823 /*
824 * Either,
825 * 1. We were no longer entitled to a batch.
826 * 2. The domain we were batching didn't have any requests.
827 * 3. The domain we were batching was out of tokens.
828 *
829 * Start another batch. Note that this wraps back around to the original
830 * domain if no other domains have requests or tokens.
831 */
832 khd->batching = 0;
833 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
834 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
835 khd->cur_domain = 0;
836 else
837 khd->cur_domain++;
838
839 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
840 if (rq)
841 goto out;
842 }
843
844 rq = NULL;
845out:
846 spin_unlock(&khd->lock);
847 return rq;
848}
849
850static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
851{
852 struct kyber_hctx_data *khd = hctx->sched_data;
853 int i;
854
855 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
856 if (!list_empty_careful(&khd->rqs[i]) ||
857 sbitmap_any_bit_set(&khd->kcq_map[i]))
858 return true;
859 }
860
861 return false;
862}
863
864#define KYBER_LAT_SHOW_STORE(domain, name) \
865static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
866 char *page) \
867{ \
868 struct kyber_queue_data *kqd = e->elevator_data; \
869 \
870 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
871} \
872 \
873static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
874 const char *page, size_t count) \
875{ \
876 struct kyber_queue_data *kqd = e->elevator_data; \
877 unsigned long long nsec; \
878 int ret; \
879 \
880 ret = kstrtoull(page, 10, &nsec); \
881 if (ret) \
882 return ret; \
883 \
884 kqd->latency_targets[domain] = nsec; \
885 \
886 return count; \
887}
888KYBER_LAT_SHOW_STORE(KYBER_READ, read);
889KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
890#undef KYBER_LAT_SHOW_STORE
891
892#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
893static struct elv_fs_entry kyber_sched_attrs[] = {
894 KYBER_LAT_ATTR(read),
895 KYBER_LAT_ATTR(write),
896 __ATTR_NULL
897};
898#undef KYBER_LAT_ATTR
899
900#ifdef CONFIG_BLK_DEBUG_FS
901#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
902static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
903{ \
904 struct request_queue *q = data; \
905 struct kyber_queue_data *kqd = q->elevator->elevator_data; \
906 \
907 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
908 return 0; \
909} \
910 \
911static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
912 __acquires(&khd->lock) \
913{ \
914 struct blk_mq_hw_ctx *hctx = m->private; \
915 struct kyber_hctx_data *khd = hctx->sched_data; \
916 \
917 spin_lock(&khd->lock); \
918 return seq_list_start(&khd->rqs[domain], *pos); \
919} \
920 \
921static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
922 loff_t *pos) \
923{ \
924 struct blk_mq_hw_ctx *hctx = m->private; \
925 struct kyber_hctx_data *khd = hctx->sched_data; \
926 \
927 return seq_list_next(v, &khd->rqs[domain], pos); \
928} \
929 \
930static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
931 __releases(&khd->lock) \
932{ \
933 struct blk_mq_hw_ctx *hctx = m->private; \
934 struct kyber_hctx_data *khd = hctx->sched_data; \
935 \
936 spin_unlock(&khd->lock); \
937} \
938 \
939static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
940 .start = kyber_##name##_rqs_start, \
941 .next = kyber_##name##_rqs_next, \
942 .stop = kyber_##name##_rqs_stop, \
943 .show = blk_mq_debugfs_rq_show, \
944}; \
945 \
946static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
947{ \
948 struct blk_mq_hw_ctx *hctx = data; \
949 struct kyber_hctx_data *khd = hctx->sched_data; \
950 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
951 \
952 seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
953 return 0; \
954}
955KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
956KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
957KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
958KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
959#undef KYBER_DEBUGFS_DOMAIN_ATTRS
960
961static int kyber_async_depth_show(void *data, struct seq_file *m)
962{
963 struct request_queue *q = data;
964 struct kyber_queue_data *kqd = q->elevator->elevator_data;
965
966 seq_printf(m, "%u\n", kqd->async_depth);
967 return 0;
968}
969
970static int kyber_cur_domain_show(void *data, struct seq_file *m)
971{
972 struct blk_mq_hw_ctx *hctx = data;
973 struct kyber_hctx_data *khd = hctx->sched_data;
974
975 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
976 return 0;
977}
978
979static int kyber_batching_show(void *data, struct seq_file *m)
980{
981 struct blk_mq_hw_ctx *hctx = data;
982 struct kyber_hctx_data *khd = hctx->sched_data;
983
984 seq_printf(m, "%u\n", khd->batching);
985 return 0;
986}
987
988#define KYBER_QUEUE_DOMAIN_ATTRS(name) \
989 {#name "_tokens", 0400, kyber_##name##_tokens_show}
990static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
991 KYBER_QUEUE_DOMAIN_ATTRS(read),
992 KYBER_QUEUE_DOMAIN_ATTRS(write),
993 KYBER_QUEUE_DOMAIN_ATTRS(discard),
994 KYBER_QUEUE_DOMAIN_ATTRS(other),
995 {"async_depth", 0400, kyber_async_depth_show},
996 {},
997};
998#undef KYBER_QUEUE_DOMAIN_ATTRS
999
1000#define KYBER_HCTX_DOMAIN_ATTRS(name) \
1001 {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
1002 {#name "_waiting", 0400, kyber_##name##_waiting_show}
1003static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
1004 KYBER_HCTX_DOMAIN_ATTRS(read),
1005 KYBER_HCTX_DOMAIN_ATTRS(write),
1006 KYBER_HCTX_DOMAIN_ATTRS(discard),
1007 KYBER_HCTX_DOMAIN_ATTRS(other),
1008 {"cur_domain", 0400, kyber_cur_domain_show},
1009 {"batching", 0400, kyber_batching_show},
1010 {},
1011};
1012#undef KYBER_HCTX_DOMAIN_ATTRS
1013#endif
1014
1015static struct elevator_type kyber_sched = {
1016 .ops = {
1017 .init_sched = kyber_init_sched,
1018 .exit_sched = kyber_exit_sched,
1019 .init_hctx = kyber_init_hctx,
1020 .exit_hctx = kyber_exit_hctx,
1021 .limit_depth = kyber_limit_depth,
1022 .bio_merge = kyber_bio_merge,
1023 .prepare_request = kyber_prepare_request,
1024 .insert_requests = kyber_insert_requests,
1025 .finish_request = kyber_finish_request,
1026 .requeue_request = kyber_finish_request,
1027 .completed_request = kyber_completed_request,
1028 .dispatch_request = kyber_dispatch_request,
1029 .has_work = kyber_has_work,
1030 .depth_updated = kyber_depth_updated,
1031 },
1032#ifdef CONFIG_BLK_DEBUG_FS
1033 .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
1034 .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
1035#endif
1036 .elevator_attrs = kyber_sched_attrs,
1037 .elevator_name = "kyber",
1038 .elevator_owner = THIS_MODULE,
1039};
1040
1041static int __init kyber_init(void)
1042{
1043 return elv_register(&kyber_sched);
1044}
1045
1046static void __exit kyber_exit(void)
1047{
1048 elv_unregister(&kyber_sched);
1049}
1050
1051module_init(kyber_init);
1052module_exit(kyber_exit);
1053
1054MODULE_AUTHOR("Omar Sandoval");
1055MODULE_LICENSE("GPL");
1056MODULE_DESCRIPTION("Kyber I/O scheduler");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
4 * scalable techniques.
5 *
6 * Copyright (C) 2017 Facebook
7 */
8
9#include <linux/kernel.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
12#include <linux/elevator.h>
13#include <linux/module.h>
14#include <linux/sbitmap.h>
15
16#include "blk.h"
17#include "blk-mq.h"
18#include "blk-mq-debugfs.h"
19#include "blk-mq-sched.h"
20#include "blk-mq-tag.h"
21
22#define CREATE_TRACE_POINTS
23#include <trace/events/kyber.h>
24
25/*
26 * Scheduling domains: the device is divided into multiple domains based on the
27 * request type.
28 */
29enum {
30 KYBER_READ,
31 KYBER_WRITE,
32 KYBER_DISCARD,
33 KYBER_OTHER,
34 KYBER_NUM_DOMAINS,
35};
36
37static const char *kyber_domain_names[] = {
38 [KYBER_READ] = "READ",
39 [KYBER_WRITE] = "WRITE",
40 [KYBER_DISCARD] = "DISCARD",
41 [KYBER_OTHER] = "OTHER",
42};
43
44enum {
45 /*
46 * In order to prevent starvation of synchronous requests by a flood of
47 * asynchronous requests, we reserve 25% of requests for synchronous
48 * operations.
49 */
50 KYBER_ASYNC_PERCENT = 75,
51};
52
53/*
54 * Maximum device-wide depth for each scheduling domain.
55 *
56 * Even for fast devices with lots of tags like NVMe, you can saturate the
57 * device with only a fraction of the maximum possible queue depth. So, we cap
58 * these to a reasonable value.
59 */
60static const unsigned int kyber_depth[] = {
61 [KYBER_READ] = 256,
62 [KYBER_WRITE] = 128,
63 [KYBER_DISCARD] = 64,
64 [KYBER_OTHER] = 16,
65};
66
67/*
68 * Default latency targets for each scheduling domain.
69 */
70static const u64 kyber_latency_targets[] = {
71 [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
72 [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
73 [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
74};
75
76/*
77 * Batch size (number of requests we'll dispatch in a row) for each scheduling
78 * domain.
79 */
80static const unsigned int kyber_batch_size[] = {
81 [KYBER_READ] = 16,
82 [KYBER_WRITE] = 8,
83 [KYBER_DISCARD] = 1,
84 [KYBER_OTHER] = 1,
85};
86
87/*
88 * Requests latencies are recorded in a histogram with buckets defined relative
89 * to the target latency:
90 *
91 * <= 1/4 * target latency
92 * <= 1/2 * target latency
93 * <= 3/4 * target latency
94 * <= target latency
95 * <= 1 1/4 * target latency
96 * <= 1 1/2 * target latency
97 * <= 1 3/4 * target latency
98 * > 1 3/4 * target latency
99 */
100enum {
101 /*
102 * The width of the latency histogram buckets is
103 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
104 */
105 KYBER_LATENCY_SHIFT = 2,
106 /*
107 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
108 * thus, "good".
109 */
110 KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
111 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
112 KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
113};
114
115/*
116 * We measure both the total latency and the I/O latency (i.e., latency after
117 * submitting to the device).
118 */
119enum {
120 KYBER_TOTAL_LATENCY,
121 KYBER_IO_LATENCY,
122};
123
124static const char *kyber_latency_type_names[] = {
125 [KYBER_TOTAL_LATENCY] = "total",
126 [KYBER_IO_LATENCY] = "I/O",
127};
128
129/*
130 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
131 * domain except for KYBER_OTHER.
132 */
133struct kyber_cpu_latency {
134 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
135};
136
137/*
138 * There is a same mapping between ctx & hctx and kcq & khd,
139 * we use request->mq_ctx->index_hw to index the kcq in khd.
140 */
141struct kyber_ctx_queue {
142 /*
143 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
144 * Also protect the rqs on rq_list when merge.
145 */
146 spinlock_t lock;
147 struct list_head rq_list[KYBER_NUM_DOMAINS];
148} ____cacheline_aligned_in_smp;
149
150struct kyber_queue_data {
151 struct request_queue *q;
152
153 /*
154 * Each scheduling domain has a limited number of in-flight requests
155 * device-wide, limited by these tokens.
156 */
157 struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
158
159 /*
160 * Async request percentage, converted to per-word depth for
161 * sbitmap_get_shallow().
162 */
163 unsigned int async_depth;
164
165 struct kyber_cpu_latency __percpu *cpu_latency;
166
167 /* Timer for stats aggregation and adjusting domain tokens. */
168 struct timer_list timer;
169
170 unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
171
172 unsigned long latency_timeout[KYBER_OTHER];
173
174 int domain_p99[KYBER_OTHER];
175
176 /* Target latencies in nanoseconds. */
177 u64 latency_targets[KYBER_OTHER];
178};
179
180struct kyber_hctx_data {
181 spinlock_t lock;
182 struct list_head rqs[KYBER_NUM_DOMAINS];
183 unsigned int cur_domain;
184 unsigned int batching;
185 struct kyber_ctx_queue *kcqs;
186 struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
187 struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
188 struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
189 atomic_t wait_index[KYBER_NUM_DOMAINS];
190};
191
192static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
193 void *key);
194
195static unsigned int kyber_sched_domain(unsigned int op)
196{
197 switch (op & REQ_OP_MASK) {
198 case REQ_OP_READ:
199 return KYBER_READ;
200 case REQ_OP_WRITE:
201 return KYBER_WRITE;
202 case REQ_OP_DISCARD:
203 return KYBER_DISCARD;
204 default:
205 return KYBER_OTHER;
206 }
207}
208
209static void flush_latency_buckets(struct kyber_queue_data *kqd,
210 struct kyber_cpu_latency *cpu_latency,
211 unsigned int sched_domain, unsigned int type)
212{
213 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
214 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
215 unsigned int bucket;
216
217 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
218 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
219}
220
221/*
222 * Calculate the histogram bucket with the given percentile rank, or -1 if there
223 * aren't enough samples yet.
224 */
225static int calculate_percentile(struct kyber_queue_data *kqd,
226 unsigned int sched_domain, unsigned int type,
227 unsigned int percentile)
228{
229 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
230 unsigned int bucket, samples = 0, percentile_samples;
231
232 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
233 samples += buckets[bucket];
234
235 if (!samples)
236 return -1;
237
238 /*
239 * We do the calculation once we have 500 samples or one second passes
240 * since the first sample was recorded, whichever comes first.
241 */
242 if (!kqd->latency_timeout[sched_domain])
243 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
244 if (samples < 500 &&
245 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
246 return -1;
247 }
248 kqd->latency_timeout[sched_domain] = 0;
249
250 percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
251 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
252 if (buckets[bucket] >= percentile_samples)
253 break;
254 percentile_samples -= buckets[bucket];
255 }
256 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
257
258 trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
259 kyber_latency_type_names[type], percentile,
260 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
261
262 return bucket;
263}
264
265static void kyber_resize_domain(struct kyber_queue_data *kqd,
266 unsigned int sched_domain, unsigned int depth)
267{
268 depth = clamp(depth, 1U, kyber_depth[sched_domain]);
269 if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
270 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
271 trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
272 depth);
273 }
274}
275
276static void kyber_timer_fn(struct timer_list *t)
277{
278 struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
279 unsigned int sched_domain;
280 int cpu;
281 bool bad = false;
282
283 /* Sum all of the per-cpu latency histograms. */
284 for_each_online_cpu(cpu) {
285 struct kyber_cpu_latency *cpu_latency;
286
287 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
288 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
289 flush_latency_buckets(kqd, cpu_latency, sched_domain,
290 KYBER_TOTAL_LATENCY);
291 flush_latency_buckets(kqd, cpu_latency, sched_domain,
292 KYBER_IO_LATENCY);
293 }
294 }
295
296 /*
297 * Check if any domains have a high I/O latency, which might indicate
298 * congestion in the device. Note that we use the p90; we don't want to
299 * be too sensitive to outliers here.
300 */
301 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
302 int p90;
303
304 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
305 90);
306 if (p90 >= KYBER_GOOD_BUCKETS)
307 bad = true;
308 }
309
310 /*
311 * Adjust the scheduling domain depths. If we determined that there was
312 * congestion, we throttle all domains with good latencies. Either way,
313 * we ease up on throttling domains with bad latencies.
314 */
315 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
316 unsigned int orig_depth, depth;
317 int p99;
318
319 p99 = calculate_percentile(kqd, sched_domain,
320 KYBER_TOTAL_LATENCY, 99);
321 /*
322 * This is kind of subtle: different domains will not
323 * necessarily have enough samples to calculate the latency
324 * percentiles during the same window, so we have to remember
325 * the p99 for the next time we observe congestion; once we do,
326 * we don't want to throttle again until we get more data, so we
327 * reset it to -1.
328 */
329 if (bad) {
330 if (p99 < 0)
331 p99 = kqd->domain_p99[sched_domain];
332 kqd->domain_p99[sched_domain] = -1;
333 } else if (p99 >= 0) {
334 kqd->domain_p99[sched_domain] = p99;
335 }
336 if (p99 < 0)
337 continue;
338
339 /*
340 * If this domain has bad latency, throttle less. Otherwise,
341 * throttle more iff we determined that there is congestion.
342 *
343 * The new depth is scaled linearly with the p99 latency vs the
344 * latency target. E.g., if the p99 is 3/4 of the target, then
345 * we throttle down to 3/4 of the current depth, and if the p99
346 * is 2x the target, then we double the depth.
347 */
348 if (bad || p99 >= KYBER_GOOD_BUCKETS) {
349 orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
350 depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
351 kyber_resize_domain(kqd, sched_domain, depth);
352 }
353 }
354}
355
356static unsigned int kyber_sched_tags_shift(struct request_queue *q)
357{
358 /*
359 * All of the hardware queues have the same depth, so we can just grab
360 * the shift of the first one.
361 */
362 return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
363}
364
365static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
366{
367 struct kyber_queue_data *kqd;
368 unsigned int shift;
369 int ret = -ENOMEM;
370 int i;
371
372 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
373 if (!kqd)
374 goto err;
375
376 kqd->q = q;
377
378 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
379 GFP_KERNEL | __GFP_ZERO);
380 if (!kqd->cpu_latency)
381 goto err_kqd;
382
383 timer_setup(&kqd->timer, kyber_timer_fn, 0);
384
385 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
386 WARN_ON(!kyber_depth[i]);
387 WARN_ON(!kyber_batch_size[i]);
388 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
389 kyber_depth[i], -1, false,
390 GFP_KERNEL, q->node);
391 if (ret) {
392 while (--i >= 0)
393 sbitmap_queue_free(&kqd->domain_tokens[i]);
394 goto err_buckets;
395 }
396 }
397
398 for (i = 0; i < KYBER_OTHER; i++) {
399 kqd->domain_p99[i] = -1;
400 kqd->latency_targets[i] = kyber_latency_targets[i];
401 }
402
403 shift = kyber_sched_tags_shift(q);
404 kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
405
406 return kqd;
407
408err_buckets:
409 free_percpu(kqd->cpu_latency);
410err_kqd:
411 kfree(kqd);
412err:
413 return ERR_PTR(ret);
414}
415
416static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
417{
418 struct kyber_queue_data *kqd;
419 struct elevator_queue *eq;
420
421 eq = elevator_alloc(q, e);
422 if (!eq)
423 return -ENOMEM;
424
425 kqd = kyber_queue_data_alloc(q);
426 if (IS_ERR(kqd)) {
427 kobject_put(&eq->kobj);
428 return PTR_ERR(kqd);
429 }
430
431 blk_stat_enable_accounting(q);
432
433 eq->elevator_data = kqd;
434 q->elevator = eq;
435
436 return 0;
437}
438
439static void kyber_exit_sched(struct elevator_queue *e)
440{
441 struct kyber_queue_data *kqd = e->elevator_data;
442 int i;
443
444 del_timer_sync(&kqd->timer);
445
446 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
447 sbitmap_queue_free(&kqd->domain_tokens[i]);
448 free_percpu(kqd->cpu_latency);
449 kfree(kqd);
450}
451
452static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
453{
454 unsigned int i;
455
456 spin_lock_init(&kcq->lock);
457 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
458 INIT_LIST_HEAD(&kcq->rq_list[i]);
459}
460
461static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
462{
463 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
464 struct kyber_hctx_data *khd;
465 int i;
466
467 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
468 if (!khd)
469 return -ENOMEM;
470
471 khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
472 sizeof(struct kyber_ctx_queue),
473 GFP_KERNEL, hctx->numa_node);
474 if (!khd->kcqs)
475 goto err_khd;
476
477 for (i = 0; i < hctx->nr_ctx; i++)
478 kyber_ctx_queue_init(&khd->kcqs[i]);
479
480 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
481 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
482 ilog2(8), GFP_KERNEL, hctx->numa_node)) {
483 while (--i >= 0)
484 sbitmap_free(&khd->kcq_map[i]);
485 goto err_kcqs;
486 }
487 }
488
489 spin_lock_init(&khd->lock);
490
491 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
492 INIT_LIST_HEAD(&khd->rqs[i]);
493 khd->domain_wait[i].sbq = NULL;
494 init_waitqueue_func_entry(&khd->domain_wait[i].wait,
495 kyber_domain_wake);
496 khd->domain_wait[i].wait.private = hctx;
497 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
498 atomic_set(&khd->wait_index[i], 0);
499 }
500
501 khd->cur_domain = 0;
502 khd->batching = 0;
503
504 hctx->sched_data = khd;
505 sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags,
506 kqd->async_depth);
507
508 return 0;
509
510err_kcqs:
511 kfree(khd->kcqs);
512err_khd:
513 kfree(khd);
514 return -ENOMEM;
515}
516
517static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
518{
519 struct kyber_hctx_data *khd = hctx->sched_data;
520 int i;
521
522 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
523 sbitmap_free(&khd->kcq_map[i]);
524 kfree(khd->kcqs);
525 kfree(hctx->sched_data);
526}
527
528static int rq_get_domain_token(struct request *rq)
529{
530 return (long)rq->elv.priv[0];
531}
532
533static void rq_set_domain_token(struct request *rq, int token)
534{
535 rq->elv.priv[0] = (void *)(long)token;
536}
537
538static void rq_clear_domain_token(struct kyber_queue_data *kqd,
539 struct request *rq)
540{
541 unsigned int sched_domain;
542 int nr;
543
544 nr = rq_get_domain_token(rq);
545 if (nr != -1) {
546 sched_domain = kyber_sched_domain(rq->cmd_flags);
547 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
548 rq->mq_ctx->cpu);
549 }
550}
551
552static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
553{
554 /*
555 * We use the scheduler tags as per-hardware queue queueing tokens.
556 * Async requests can be limited at this stage.
557 */
558 if (!op_is_sync(op)) {
559 struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
560
561 data->shallow_depth = kqd->async_depth;
562 }
563}
564
565static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
566 unsigned int nr_segs)
567{
568 struct kyber_hctx_data *khd = hctx->sched_data;
569 struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
570 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
571 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
572 struct list_head *rq_list = &kcq->rq_list[sched_domain];
573 bool merged;
574
575 spin_lock(&kcq->lock);
576 merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
577 spin_unlock(&kcq->lock);
578
579 return merged;
580}
581
582static void kyber_prepare_request(struct request *rq)
583{
584 rq_set_domain_token(rq, -1);
585}
586
587static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
588 struct list_head *rq_list, bool at_head)
589{
590 struct kyber_hctx_data *khd = hctx->sched_data;
591 struct request *rq, *next;
592
593 list_for_each_entry_safe(rq, next, rq_list, queuelist) {
594 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
595 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
596 struct list_head *head = &kcq->rq_list[sched_domain];
597
598 spin_lock(&kcq->lock);
599 if (at_head)
600 list_move(&rq->queuelist, head);
601 else
602 list_move_tail(&rq->queuelist, head);
603 sbitmap_set_bit(&khd->kcq_map[sched_domain],
604 rq->mq_ctx->index_hw[hctx->type]);
605 blk_mq_sched_request_inserted(rq);
606 spin_unlock(&kcq->lock);
607 }
608}
609
610static void kyber_finish_request(struct request *rq)
611{
612 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
613
614 rq_clear_domain_token(kqd, rq);
615}
616
617static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
618 unsigned int sched_domain, unsigned int type,
619 u64 target, u64 latency)
620{
621 unsigned int bucket;
622 u64 divisor;
623
624 if (latency > 0) {
625 divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
626 bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
627 KYBER_LATENCY_BUCKETS - 1);
628 } else {
629 bucket = 0;
630 }
631
632 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
633}
634
635static void kyber_completed_request(struct request *rq, u64 now)
636{
637 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
638 struct kyber_cpu_latency *cpu_latency;
639 unsigned int sched_domain;
640 u64 target;
641
642 sched_domain = kyber_sched_domain(rq->cmd_flags);
643 if (sched_domain == KYBER_OTHER)
644 return;
645
646 cpu_latency = get_cpu_ptr(kqd->cpu_latency);
647 target = kqd->latency_targets[sched_domain];
648 add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
649 target, now - rq->start_time_ns);
650 add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
651 now - rq->io_start_time_ns);
652 put_cpu_ptr(kqd->cpu_latency);
653
654 timer_reduce(&kqd->timer, jiffies + HZ / 10);
655}
656
657struct flush_kcq_data {
658 struct kyber_hctx_data *khd;
659 unsigned int sched_domain;
660 struct list_head *list;
661};
662
663static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
664{
665 struct flush_kcq_data *flush_data = data;
666 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
667
668 spin_lock(&kcq->lock);
669 list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
670 flush_data->list);
671 sbitmap_clear_bit(sb, bitnr);
672 spin_unlock(&kcq->lock);
673
674 return true;
675}
676
677static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
678 unsigned int sched_domain,
679 struct list_head *list)
680{
681 struct flush_kcq_data data = {
682 .khd = khd,
683 .sched_domain = sched_domain,
684 .list = list,
685 };
686
687 sbitmap_for_each_set(&khd->kcq_map[sched_domain],
688 flush_busy_kcq, &data);
689}
690
691static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
692 void *key)
693{
694 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
695 struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
696
697 sbitmap_del_wait_queue(wait);
698 blk_mq_run_hw_queue(hctx, true);
699 return 1;
700}
701
702static int kyber_get_domain_token(struct kyber_queue_data *kqd,
703 struct kyber_hctx_data *khd,
704 struct blk_mq_hw_ctx *hctx)
705{
706 unsigned int sched_domain = khd->cur_domain;
707 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
708 struct sbq_wait *wait = &khd->domain_wait[sched_domain];
709 struct sbq_wait_state *ws;
710 int nr;
711
712 nr = __sbitmap_queue_get(domain_tokens);
713
714 /*
715 * If we failed to get a domain token, make sure the hardware queue is
716 * run when one becomes available. Note that this is serialized on
717 * khd->lock, but we still need to be careful about the waker.
718 */
719 if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
720 ws = sbq_wait_ptr(domain_tokens,
721 &khd->wait_index[sched_domain]);
722 khd->domain_ws[sched_domain] = ws;
723 sbitmap_add_wait_queue(domain_tokens, ws, wait);
724
725 /*
726 * Try again in case a token was freed before we got on the wait
727 * queue.
728 */
729 nr = __sbitmap_queue_get(domain_tokens);
730 }
731
732 /*
733 * If we got a token while we were on the wait queue, remove ourselves
734 * from the wait queue to ensure that all wake ups make forward
735 * progress. It's possible that the waker already deleted the entry
736 * between the !list_empty_careful() check and us grabbing the lock, but
737 * list_del_init() is okay with that.
738 */
739 if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
740 ws = khd->domain_ws[sched_domain];
741 spin_lock_irq(&ws->wait.lock);
742 sbitmap_del_wait_queue(wait);
743 spin_unlock_irq(&ws->wait.lock);
744 }
745
746 return nr;
747}
748
749static struct request *
750kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
751 struct kyber_hctx_data *khd,
752 struct blk_mq_hw_ctx *hctx)
753{
754 struct list_head *rqs;
755 struct request *rq;
756 int nr;
757
758 rqs = &khd->rqs[khd->cur_domain];
759
760 /*
761 * If we already have a flushed request, then we just need to get a
762 * token for it. Otherwise, if there are pending requests in the kcqs,
763 * flush the kcqs, but only if we can get a token. If not, we should
764 * leave the requests in the kcqs so that they can be merged. Note that
765 * khd->lock serializes the flushes, so if we observed any bit set in
766 * the kcq_map, we will always get a request.
767 */
768 rq = list_first_entry_or_null(rqs, struct request, queuelist);
769 if (rq) {
770 nr = kyber_get_domain_token(kqd, khd, hctx);
771 if (nr >= 0) {
772 khd->batching++;
773 rq_set_domain_token(rq, nr);
774 list_del_init(&rq->queuelist);
775 return rq;
776 } else {
777 trace_kyber_throttled(kqd->q,
778 kyber_domain_names[khd->cur_domain]);
779 }
780 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
781 nr = kyber_get_domain_token(kqd, khd, hctx);
782 if (nr >= 0) {
783 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
784 rq = list_first_entry(rqs, struct request, queuelist);
785 khd->batching++;
786 rq_set_domain_token(rq, nr);
787 list_del_init(&rq->queuelist);
788 return rq;
789 } else {
790 trace_kyber_throttled(kqd->q,
791 kyber_domain_names[khd->cur_domain]);
792 }
793 }
794
795 /* There were either no pending requests or no tokens. */
796 return NULL;
797}
798
799static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
800{
801 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
802 struct kyber_hctx_data *khd = hctx->sched_data;
803 struct request *rq;
804 int i;
805
806 spin_lock(&khd->lock);
807
808 /*
809 * First, if we are still entitled to batch, try to dispatch a request
810 * from the batch.
811 */
812 if (khd->batching < kyber_batch_size[khd->cur_domain]) {
813 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
814 if (rq)
815 goto out;
816 }
817
818 /*
819 * Either,
820 * 1. We were no longer entitled to a batch.
821 * 2. The domain we were batching didn't have any requests.
822 * 3. The domain we were batching was out of tokens.
823 *
824 * Start another batch. Note that this wraps back around to the original
825 * domain if no other domains have requests or tokens.
826 */
827 khd->batching = 0;
828 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
829 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
830 khd->cur_domain = 0;
831 else
832 khd->cur_domain++;
833
834 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
835 if (rq)
836 goto out;
837 }
838
839 rq = NULL;
840out:
841 spin_unlock(&khd->lock);
842 return rq;
843}
844
845static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
846{
847 struct kyber_hctx_data *khd = hctx->sched_data;
848 int i;
849
850 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
851 if (!list_empty_careful(&khd->rqs[i]) ||
852 sbitmap_any_bit_set(&khd->kcq_map[i]))
853 return true;
854 }
855
856 return false;
857}
858
859#define KYBER_LAT_SHOW_STORE(domain, name) \
860static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
861 char *page) \
862{ \
863 struct kyber_queue_data *kqd = e->elevator_data; \
864 \
865 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
866} \
867 \
868static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
869 const char *page, size_t count) \
870{ \
871 struct kyber_queue_data *kqd = e->elevator_data; \
872 unsigned long long nsec; \
873 int ret; \
874 \
875 ret = kstrtoull(page, 10, &nsec); \
876 if (ret) \
877 return ret; \
878 \
879 kqd->latency_targets[domain] = nsec; \
880 \
881 return count; \
882}
883KYBER_LAT_SHOW_STORE(KYBER_READ, read);
884KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
885#undef KYBER_LAT_SHOW_STORE
886
887#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
888static struct elv_fs_entry kyber_sched_attrs[] = {
889 KYBER_LAT_ATTR(read),
890 KYBER_LAT_ATTR(write),
891 __ATTR_NULL
892};
893#undef KYBER_LAT_ATTR
894
895#ifdef CONFIG_BLK_DEBUG_FS
896#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
897static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
898{ \
899 struct request_queue *q = data; \
900 struct kyber_queue_data *kqd = q->elevator->elevator_data; \
901 \
902 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
903 return 0; \
904} \
905 \
906static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
907 __acquires(&khd->lock) \
908{ \
909 struct blk_mq_hw_ctx *hctx = m->private; \
910 struct kyber_hctx_data *khd = hctx->sched_data; \
911 \
912 spin_lock(&khd->lock); \
913 return seq_list_start(&khd->rqs[domain], *pos); \
914} \
915 \
916static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
917 loff_t *pos) \
918{ \
919 struct blk_mq_hw_ctx *hctx = m->private; \
920 struct kyber_hctx_data *khd = hctx->sched_data; \
921 \
922 return seq_list_next(v, &khd->rqs[domain], pos); \
923} \
924 \
925static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
926 __releases(&khd->lock) \
927{ \
928 struct blk_mq_hw_ctx *hctx = m->private; \
929 struct kyber_hctx_data *khd = hctx->sched_data; \
930 \
931 spin_unlock(&khd->lock); \
932} \
933 \
934static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
935 .start = kyber_##name##_rqs_start, \
936 .next = kyber_##name##_rqs_next, \
937 .stop = kyber_##name##_rqs_stop, \
938 .show = blk_mq_debugfs_rq_show, \
939}; \
940 \
941static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
942{ \
943 struct blk_mq_hw_ctx *hctx = data; \
944 struct kyber_hctx_data *khd = hctx->sched_data; \
945 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
946 \
947 seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
948 return 0; \
949}
950KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
951KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
952KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
953KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
954#undef KYBER_DEBUGFS_DOMAIN_ATTRS
955
956static int kyber_async_depth_show(void *data, struct seq_file *m)
957{
958 struct request_queue *q = data;
959 struct kyber_queue_data *kqd = q->elevator->elevator_data;
960
961 seq_printf(m, "%u\n", kqd->async_depth);
962 return 0;
963}
964
965static int kyber_cur_domain_show(void *data, struct seq_file *m)
966{
967 struct blk_mq_hw_ctx *hctx = data;
968 struct kyber_hctx_data *khd = hctx->sched_data;
969
970 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
971 return 0;
972}
973
974static int kyber_batching_show(void *data, struct seq_file *m)
975{
976 struct blk_mq_hw_ctx *hctx = data;
977 struct kyber_hctx_data *khd = hctx->sched_data;
978
979 seq_printf(m, "%u\n", khd->batching);
980 return 0;
981}
982
983#define KYBER_QUEUE_DOMAIN_ATTRS(name) \
984 {#name "_tokens", 0400, kyber_##name##_tokens_show}
985static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
986 KYBER_QUEUE_DOMAIN_ATTRS(read),
987 KYBER_QUEUE_DOMAIN_ATTRS(write),
988 KYBER_QUEUE_DOMAIN_ATTRS(discard),
989 KYBER_QUEUE_DOMAIN_ATTRS(other),
990 {"async_depth", 0400, kyber_async_depth_show},
991 {},
992};
993#undef KYBER_QUEUE_DOMAIN_ATTRS
994
995#define KYBER_HCTX_DOMAIN_ATTRS(name) \
996 {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
997 {#name "_waiting", 0400, kyber_##name##_waiting_show}
998static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
999 KYBER_HCTX_DOMAIN_ATTRS(read),
1000 KYBER_HCTX_DOMAIN_ATTRS(write),
1001 KYBER_HCTX_DOMAIN_ATTRS(discard),
1002 KYBER_HCTX_DOMAIN_ATTRS(other),
1003 {"cur_domain", 0400, kyber_cur_domain_show},
1004 {"batching", 0400, kyber_batching_show},
1005 {},
1006};
1007#undef KYBER_HCTX_DOMAIN_ATTRS
1008#endif
1009
1010static struct elevator_type kyber_sched = {
1011 .ops = {
1012 .init_sched = kyber_init_sched,
1013 .exit_sched = kyber_exit_sched,
1014 .init_hctx = kyber_init_hctx,
1015 .exit_hctx = kyber_exit_hctx,
1016 .limit_depth = kyber_limit_depth,
1017 .bio_merge = kyber_bio_merge,
1018 .prepare_request = kyber_prepare_request,
1019 .insert_requests = kyber_insert_requests,
1020 .finish_request = kyber_finish_request,
1021 .requeue_request = kyber_finish_request,
1022 .completed_request = kyber_completed_request,
1023 .dispatch_request = kyber_dispatch_request,
1024 .has_work = kyber_has_work,
1025 },
1026#ifdef CONFIG_BLK_DEBUG_FS
1027 .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
1028 .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
1029#endif
1030 .elevator_attrs = kyber_sched_attrs,
1031 .elevator_name = "kyber",
1032 .elevator_owner = THIS_MODULE,
1033};
1034
1035static int __init kyber_init(void)
1036{
1037 return elv_register(&kyber_sched);
1038}
1039
1040static void __exit kyber_exit(void)
1041{
1042 elv_unregister(&kyber_sched);
1043}
1044
1045module_init(kyber_init);
1046module_exit(kyber_exit);
1047
1048MODULE_AUTHOR("Omar Sandoval");
1049MODULE_LICENSE("GPL");
1050MODULE_DESCRIPTION("Kyber I/O scheduler");