Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
   4 * scalable techniques.
   5 *
   6 * Copyright (C) 2017 Facebook
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/blkdev.h>
  11#include <linux/blk-mq.h>
  12#include <linux/module.h>
  13#include <linux/sbitmap.h>
  14
  15#include <trace/events/block.h>
  16
  17#include "elevator.h"
  18#include "blk.h"
  19#include "blk-mq.h"
  20#include "blk-mq-debugfs.h"
  21#include "blk-mq-sched.h"
  22#include "blk-mq-tag.h"
  23
  24#define CREATE_TRACE_POINTS
  25#include <trace/events/kyber.h>
  26
  27/*
  28 * Scheduling domains: the device is divided into multiple domains based on the
  29 * request type.
  30 */
  31enum {
  32	KYBER_READ,
  33	KYBER_WRITE,
  34	KYBER_DISCARD,
  35	KYBER_OTHER,
  36	KYBER_NUM_DOMAINS,
  37};
  38
  39static const char *kyber_domain_names[] = {
  40	[KYBER_READ] = "READ",
  41	[KYBER_WRITE] = "WRITE",
  42	[KYBER_DISCARD] = "DISCARD",
  43	[KYBER_OTHER] = "OTHER",
  44};
  45
  46enum {
  47	/*
  48	 * In order to prevent starvation of synchronous requests by a flood of
  49	 * asynchronous requests, we reserve 25% of requests for synchronous
  50	 * operations.
  51	 */
  52	KYBER_ASYNC_PERCENT = 75,
  53};
  54
  55/*
  56 * Maximum device-wide depth for each scheduling domain.
  57 *
  58 * Even for fast devices with lots of tags like NVMe, you can saturate the
  59 * device with only a fraction of the maximum possible queue depth. So, we cap
  60 * these to a reasonable value.
  61 */
  62static const unsigned int kyber_depth[] = {
  63	[KYBER_READ] = 256,
  64	[KYBER_WRITE] = 128,
  65	[KYBER_DISCARD] = 64,
  66	[KYBER_OTHER] = 16,
  67};
  68
  69/*
  70 * Default latency targets for each scheduling domain.
  71 */
  72static const u64 kyber_latency_targets[] = {
  73	[KYBER_READ] = 2ULL * NSEC_PER_MSEC,
  74	[KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
  75	[KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
  76};
  77
  78/*
  79 * Batch size (number of requests we'll dispatch in a row) for each scheduling
  80 * domain.
  81 */
  82static const unsigned int kyber_batch_size[] = {
  83	[KYBER_READ] = 16,
  84	[KYBER_WRITE] = 8,
  85	[KYBER_DISCARD] = 1,
  86	[KYBER_OTHER] = 1,
  87};
  88
  89/*
  90 * Requests latencies are recorded in a histogram with buckets defined relative
  91 * to the target latency:
  92 *
  93 * <= 1/4 * target latency
  94 * <= 1/2 * target latency
  95 * <= 3/4 * target latency
  96 * <= target latency
  97 * <= 1 1/4 * target latency
  98 * <= 1 1/2 * target latency
  99 * <= 1 3/4 * target latency
 100 * > 1 3/4 * target latency
 101 */
 102enum {
 103	/*
 104	 * The width of the latency histogram buckets is
 105	 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
 106	 */
 107	KYBER_LATENCY_SHIFT = 2,
 108	/*
 109	 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
 110	 * thus, "good".
 111	 */
 112	KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
 113	/* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
 114	KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
 115};
 116
 117/*
 118 * We measure both the total latency and the I/O latency (i.e., latency after
 119 * submitting to the device).
 120 */
 121enum {
 122	KYBER_TOTAL_LATENCY,
 123	KYBER_IO_LATENCY,
 124};
 125
 126static const char *kyber_latency_type_names[] = {
 127	[KYBER_TOTAL_LATENCY] = "total",
 128	[KYBER_IO_LATENCY] = "I/O",
 129};
 130
 131/*
 132 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
 133 * domain except for KYBER_OTHER.
 134 */
 135struct kyber_cpu_latency {
 136	atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
 137};
 138
 139/*
 140 * There is a same mapping between ctx & hctx and kcq & khd,
 141 * we use request->mq_ctx->index_hw to index the kcq in khd.
 142 */
 143struct kyber_ctx_queue {
 144	/*
 145	 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
 146	 * Also protect the rqs on rq_list when merge.
 147	 */
 148	spinlock_t lock;
 149	struct list_head rq_list[KYBER_NUM_DOMAINS];
 150} ____cacheline_aligned_in_smp;
 151
 152struct kyber_queue_data {
 153	struct request_queue *q;
 154	dev_t dev;
 155
 156	/*
 157	 * Each scheduling domain has a limited number of in-flight requests
 158	 * device-wide, limited by these tokens.
 159	 */
 160	struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
 161
 162	/*
 163	 * Async request percentage, converted to per-word depth for
 164	 * sbitmap_get_shallow().
 165	 */
 166	unsigned int async_depth;
 167
 168	struct kyber_cpu_latency __percpu *cpu_latency;
 169
 170	/* Timer for stats aggregation and adjusting domain tokens. */
 171	struct timer_list timer;
 172
 173	unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
 174
 175	unsigned long latency_timeout[KYBER_OTHER];
 176
 177	int domain_p99[KYBER_OTHER];
 178
 179	/* Target latencies in nanoseconds. */
 180	u64 latency_targets[KYBER_OTHER];
 181};
 182
 183struct kyber_hctx_data {
 184	spinlock_t lock;
 185	struct list_head rqs[KYBER_NUM_DOMAINS];
 186	unsigned int cur_domain;
 187	unsigned int batching;
 188	struct kyber_ctx_queue *kcqs;
 189	struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
 190	struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
 191	struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
 192	atomic_t wait_index[KYBER_NUM_DOMAINS];
 193};
 194
 195static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
 196			     void *key);
 197
 198static unsigned int kyber_sched_domain(blk_opf_t opf)
 199{
 200	switch (opf & REQ_OP_MASK) {
 201	case REQ_OP_READ:
 202		return KYBER_READ;
 203	case REQ_OP_WRITE:
 204		return KYBER_WRITE;
 205	case REQ_OP_DISCARD:
 206		return KYBER_DISCARD;
 207	default:
 208		return KYBER_OTHER;
 209	}
 210}
 211
 212static void flush_latency_buckets(struct kyber_queue_data *kqd,
 213				  struct kyber_cpu_latency *cpu_latency,
 214				  unsigned int sched_domain, unsigned int type)
 215{
 216	unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
 217	atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
 218	unsigned int bucket;
 219
 220	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
 221		buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
 222}
 223
 224/*
 225 * Calculate the histogram bucket with the given percentile rank, or -1 if there
 226 * aren't enough samples yet.
 227 */
 228static int calculate_percentile(struct kyber_queue_data *kqd,
 229				unsigned int sched_domain, unsigned int type,
 230				unsigned int percentile)
 231{
 232	unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
 233	unsigned int bucket, samples = 0, percentile_samples;
 234
 235	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
 236		samples += buckets[bucket];
 237
 238	if (!samples)
 239		return -1;
 240
 241	/*
 242	 * We do the calculation once we have 500 samples or one second passes
 243	 * since the first sample was recorded, whichever comes first.
 244	 */
 245	if (!kqd->latency_timeout[sched_domain])
 246		kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
 247	if (samples < 500 &&
 248	    time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
 249		return -1;
 250	}
 251	kqd->latency_timeout[sched_domain] = 0;
 252
 253	percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
 254	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
 255		if (buckets[bucket] >= percentile_samples)
 256			break;
 257		percentile_samples -= buckets[bucket];
 258	}
 259	memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
 260
 261	trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
 262			    kyber_latency_type_names[type], percentile,
 263			    bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
 264
 265	return bucket;
 266}
 267
 268static void kyber_resize_domain(struct kyber_queue_data *kqd,
 269				unsigned int sched_domain, unsigned int depth)
 270{
 271	depth = clamp(depth, 1U, kyber_depth[sched_domain]);
 272	if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
 273		sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
 274		trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
 275				   depth);
 276	}
 277}
 278
 279static void kyber_timer_fn(struct timer_list *t)
 280{
 281	struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
 282	unsigned int sched_domain;
 283	int cpu;
 284	bool bad = false;
 285
 286	/* Sum all of the per-cpu latency histograms. */
 287	for_each_online_cpu(cpu) {
 288		struct kyber_cpu_latency *cpu_latency;
 289
 290		cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
 291		for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
 292			flush_latency_buckets(kqd, cpu_latency, sched_domain,
 293					      KYBER_TOTAL_LATENCY);
 294			flush_latency_buckets(kqd, cpu_latency, sched_domain,
 295					      KYBER_IO_LATENCY);
 296		}
 297	}
 298
 299	/*
 300	 * Check if any domains have a high I/O latency, which might indicate
 301	 * congestion in the device. Note that we use the p90; we don't want to
 302	 * be too sensitive to outliers here.
 303	 */
 304	for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
 305		int p90;
 306
 307		p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
 308					   90);
 309		if (p90 >= KYBER_GOOD_BUCKETS)
 310			bad = true;
 311	}
 312
 313	/*
 314	 * Adjust the scheduling domain depths. If we determined that there was
 315	 * congestion, we throttle all domains with good latencies. Either way,
 316	 * we ease up on throttling domains with bad latencies.
 317	 */
 318	for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
 319		unsigned int orig_depth, depth;
 320		int p99;
 321
 322		p99 = calculate_percentile(kqd, sched_domain,
 323					   KYBER_TOTAL_LATENCY, 99);
 324		/*
 325		 * This is kind of subtle: different domains will not
 326		 * necessarily have enough samples to calculate the latency
 327		 * percentiles during the same window, so we have to remember
 328		 * the p99 for the next time we observe congestion; once we do,
 329		 * we don't want to throttle again until we get more data, so we
 330		 * reset it to -1.
 331		 */
 332		if (bad) {
 333			if (p99 < 0)
 334				p99 = kqd->domain_p99[sched_domain];
 335			kqd->domain_p99[sched_domain] = -1;
 336		} else if (p99 >= 0) {
 337			kqd->domain_p99[sched_domain] = p99;
 338		}
 339		if (p99 < 0)
 340			continue;
 341
 342		/*
 343		 * If this domain has bad latency, throttle less. Otherwise,
 344		 * throttle more iff we determined that there is congestion.
 345		 *
 346		 * The new depth is scaled linearly with the p99 latency vs the
 347		 * latency target. E.g., if the p99 is 3/4 of the target, then
 348		 * we throttle down to 3/4 of the current depth, and if the p99
 349		 * is 2x the target, then we double the depth.
 350		 */
 351		if (bad || p99 >= KYBER_GOOD_BUCKETS) {
 352			orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
 353			depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
 354			kyber_resize_domain(kqd, sched_domain, depth);
 355		}
 356	}
 357}
 358
 359static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
 360{
 361	struct kyber_queue_data *kqd;
 362	int ret = -ENOMEM;
 363	int i;
 364
 365	kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
 366	if (!kqd)
 367		goto err;
 368
 369	kqd->q = q;
 370	kqd->dev = disk_devt(q->disk);
 371
 372	kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
 373					    GFP_KERNEL | __GFP_ZERO);
 374	if (!kqd->cpu_latency)
 375		goto err_kqd;
 376
 377	timer_setup(&kqd->timer, kyber_timer_fn, 0);
 378
 379	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 380		WARN_ON(!kyber_depth[i]);
 381		WARN_ON(!kyber_batch_size[i]);
 382		ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
 383					      kyber_depth[i], -1, false,
 384					      GFP_KERNEL, q->node);
 385		if (ret) {
 386			while (--i >= 0)
 387				sbitmap_queue_free(&kqd->domain_tokens[i]);
 388			goto err_buckets;
 389		}
 390	}
 391
 392	for (i = 0; i < KYBER_OTHER; i++) {
 393		kqd->domain_p99[i] = -1;
 394		kqd->latency_targets[i] = kyber_latency_targets[i];
 395	}
 396
 397	return kqd;
 398
 399err_buckets:
 400	free_percpu(kqd->cpu_latency);
 401err_kqd:
 402	kfree(kqd);
 403err:
 404	return ERR_PTR(ret);
 405}
 406
 407static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
 408{
 409	struct kyber_queue_data *kqd;
 410	struct elevator_queue *eq;
 411
 412	eq = elevator_alloc(q, e);
 413	if (!eq)
 414		return -ENOMEM;
 415
 416	kqd = kyber_queue_data_alloc(q);
 417	if (IS_ERR(kqd)) {
 418		kobject_put(&eq->kobj);
 419		return PTR_ERR(kqd);
 420	}
 421
 422	blk_stat_enable_accounting(q);
 423
 424	blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
 425
 426	eq->elevator_data = kqd;
 427	q->elevator = eq;
 428
 429	return 0;
 430}
 431
 432static void kyber_exit_sched(struct elevator_queue *e)
 433{
 434	struct kyber_queue_data *kqd = e->elevator_data;
 435	int i;
 436
 437	timer_shutdown_sync(&kqd->timer);
 438	blk_stat_disable_accounting(kqd->q);
 439
 440	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
 441		sbitmap_queue_free(&kqd->domain_tokens[i]);
 442	free_percpu(kqd->cpu_latency);
 443	kfree(kqd);
 444}
 445
 446static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
 447{
 448	unsigned int i;
 449
 450	spin_lock_init(&kcq->lock);
 451	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
 452		INIT_LIST_HEAD(&kcq->rq_list[i]);
 453}
 454
 455static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
 456{
 457	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
 458	struct blk_mq_tags *tags = hctx->sched_tags;
 459	unsigned int shift = tags->bitmap_tags.sb.shift;
 460
 461	kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
 462
 463	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
 464}
 465
 466static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 467{
 468	struct kyber_hctx_data *khd;
 469	int i;
 470
 471	khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
 472	if (!khd)
 473		return -ENOMEM;
 474
 475	khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
 476				       sizeof(struct kyber_ctx_queue),
 477				       GFP_KERNEL, hctx->numa_node);
 478	if (!khd->kcqs)
 479		goto err_khd;
 480
 481	for (i = 0; i < hctx->nr_ctx; i++)
 482		kyber_ctx_queue_init(&khd->kcqs[i]);
 483
 484	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 485		if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
 486				      ilog2(8), GFP_KERNEL, hctx->numa_node,
 487				      false, false)) {
 488			while (--i >= 0)
 489				sbitmap_free(&khd->kcq_map[i]);
 490			goto err_kcqs;
 491		}
 492	}
 493
 494	spin_lock_init(&khd->lock);
 495
 496	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 497		INIT_LIST_HEAD(&khd->rqs[i]);
 498		khd->domain_wait[i].sbq = NULL;
 499		init_waitqueue_func_entry(&khd->domain_wait[i].wait,
 500					  kyber_domain_wake);
 501		khd->domain_wait[i].wait.private = hctx;
 502		INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
 503		atomic_set(&khd->wait_index[i], 0);
 504	}
 505
 506	khd->cur_domain = 0;
 507	khd->batching = 0;
 508
 509	hctx->sched_data = khd;
 510	kyber_depth_updated(hctx);
 511
 512	return 0;
 513
 514err_kcqs:
 515	kfree(khd->kcqs);
 516err_khd:
 517	kfree(khd);
 518	return -ENOMEM;
 519}
 520
 521static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 522{
 523	struct kyber_hctx_data *khd = hctx->sched_data;
 524	int i;
 525
 526	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
 527		sbitmap_free(&khd->kcq_map[i]);
 528	kfree(khd->kcqs);
 529	kfree(hctx->sched_data);
 530}
 531
 532static int rq_get_domain_token(struct request *rq)
 533{
 534	return (long)rq->elv.priv[0];
 535}
 536
 537static void rq_set_domain_token(struct request *rq, int token)
 538{
 539	rq->elv.priv[0] = (void *)(long)token;
 540}
 541
 542static void rq_clear_domain_token(struct kyber_queue_data *kqd,
 543				  struct request *rq)
 544{
 545	unsigned int sched_domain;
 546	int nr;
 547
 548	nr = rq_get_domain_token(rq);
 549	if (nr != -1) {
 550		sched_domain = kyber_sched_domain(rq->cmd_flags);
 551		sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
 552				    rq->mq_ctx->cpu);
 553	}
 554}
 555
 556static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
 557{
 558	/*
 559	 * We use the scheduler tags as per-hardware queue queueing tokens.
 560	 * Async requests can be limited at this stage.
 561	 */
 562	if (!op_is_sync(opf)) {
 563		struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
 564
 565		data->shallow_depth = kqd->async_depth;
 566	}
 567}
 568
 569static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
 570		unsigned int nr_segs)
 571{
 572	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
 573	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
 574	struct kyber_hctx_data *khd = hctx->sched_data;
 575	struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
 576	unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
 577	struct list_head *rq_list = &kcq->rq_list[sched_domain];
 578	bool merged;
 579
 580	spin_lock(&kcq->lock);
 581	merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
 582	spin_unlock(&kcq->lock);
 583
 584	return merged;
 585}
 586
 587static void kyber_prepare_request(struct request *rq)
 588{
 589	rq_set_domain_token(rq, -1);
 590}
 591
 592static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
 593				  struct list_head *rq_list, bool at_head)
 
 594{
 595	struct kyber_hctx_data *khd = hctx->sched_data;
 596	struct request *rq, *next;
 597
 598	list_for_each_entry_safe(rq, next, rq_list, queuelist) {
 599		unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
 600		struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
 601		struct list_head *head = &kcq->rq_list[sched_domain];
 602
 603		spin_lock(&kcq->lock);
 604		trace_block_rq_insert(rq);
 605		if (at_head)
 606			list_move(&rq->queuelist, head);
 607		else
 608			list_move_tail(&rq->queuelist, head);
 609		sbitmap_set_bit(&khd->kcq_map[sched_domain],
 610				rq->mq_ctx->index_hw[hctx->type]);
 611		spin_unlock(&kcq->lock);
 612	}
 613}
 614
 615static void kyber_finish_request(struct request *rq)
 616{
 617	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
 618
 619	rq_clear_domain_token(kqd, rq);
 620}
 621
 622static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
 623			       unsigned int sched_domain, unsigned int type,
 624			       u64 target, u64 latency)
 625{
 626	unsigned int bucket;
 627	u64 divisor;
 628
 629	if (latency > 0) {
 630		divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
 631		bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
 632			       KYBER_LATENCY_BUCKETS - 1);
 633	} else {
 634		bucket = 0;
 635	}
 636
 637	atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
 638}
 639
 640static void kyber_completed_request(struct request *rq, u64 now)
 641{
 642	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
 643	struct kyber_cpu_latency *cpu_latency;
 644	unsigned int sched_domain;
 645	u64 target;
 646
 647	sched_domain = kyber_sched_domain(rq->cmd_flags);
 648	if (sched_domain == KYBER_OTHER)
 649		return;
 650
 651	cpu_latency = get_cpu_ptr(kqd->cpu_latency);
 652	target = kqd->latency_targets[sched_domain];
 653	add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
 654			   target, now - rq->start_time_ns);
 655	add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
 656			   now - rq->io_start_time_ns);
 657	put_cpu_ptr(kqd->cpu_latency);
 658
 659	timer_reduce(&kqd->timer, jiffies + HZ / 10);
 660}
 661
 662struct flush_kcq_data {
 663	struct kyber_hctx_data *khd;
 664	unsigned int sched_domain;
 665	struct list_head *list;
 666};
 667
 668static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
 669{
 670	struct flush_kcq_data *flush_data = data;
 671	struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
 672
 673	spin_lock(&kcq->lock);
 674	list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
 675			      flush_data->list);
 676	sbitmap_clear_bit(sb, bitnr);
 677	spin_unlock(&kcq->lock);
 678
 679	return true;
 680}
 681
 682static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
 683				  unsigned int sched_domain,
 684				  struct list_head *list)
 685{
 686	struct flush_kcq_data data = {
 687		.khd = khd,
 688		.sched_domain = sched_domain,
 689		.list = list,
 690	};
 691
 692	sbitmap_for_each_set(&khd->kcq_map[sched_domain],
 693			     flush_busy_kcq, &data);
 694}
 695
 696static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
 697			     void *key)
 698{
 699	struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
 700	struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
 701
 702	sbitmap_del_wait_queue(wait);
 703	blk_mq_run_hw_queue(hctx, true);
 704	return 1;
 705}
 706
 707static int kyber_get_domain_token(struct kyber_queue_data *kqd,
 708				  struct kyber_hctx_data *khd,
 709				  struct blk_mq_hw_ctx *hctx)
 710{
 711	unsigned int sched_domain = khd->cur_domain;
 712	struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
 713	struct sbq_wait *wait = &khd->domain_wait[sched_domain];
 714	struct sbq_wait_state *ws;
 715	int nr;
 716
 717	nr = __sbitmap_queue_get(domain_tokens);
 718
 719	/*
 720	 * If we failed to get a domain token, make sure the hardware queue is
 721	 * run when one becomes available. Note that this is serialized on
 722	 * khd->lock, but we still need to be careful about the waker.
 723	 */
 724	if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
 725		ws = sbq_wait_ptr(domain_tokens,
 726				  &khd->wait_index[sched_domain]);
 727		khd->domain_ws[sched_domain] = ws;
 728		sbitmap_add_wait_queue(domain_tokens, ws, wait);
 729
 730		/*
 731		 * Try again in case a token was freed before we got on the wait
 732		 * queue.
 733		 */
 734		nr = __sbitmap_queue_get(domain_tokens);
 735	}
 736
 737	/*
 738	 * If we got a token while we were on the wait queue, remove ourselves
 739	 * from the wait queue to ensure that all wake ups make forward
 740	 * progress. It's possible that the waker already deleted the entry
 741	 * between the !list_empty_careful() check and us grabbing the lock, but
 742	 * list_del_init() is okay with that.
 743	 */
 744	if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
 745		ws = khd->domain_ws[sched_domain];
 746		spin_lock_irq(&ws->wait.lock);
 747		sbitmap_del_wait_queue(wait);
 748		spin_unlock_irq(&ws->wait.lock);
 749	}
 750
 751	return nr;
 752}
 753
 754static struct request *
 755kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
 756			  struct kyber_hctx_data *khd,
 757			  struct blk_mq_hw_ctx *hctx)
 758{
 759	struct list_head *rqs;
 760	struct request *rq;
 761	int nr;
 762
 763	rqs = &khd->rqs[khd->cur_domain];
 764
 765	/*
 766	 * If we already have a flushed request, then we just need to get a
 767	 * token for it. Otherwise, if there are pending requests in the kcqs,
 768	 * flush the kcqs, but only if we can get a token. If not, we should
 769	 * leave the requests in the kcqs so that they can be merged. Note that
 770	 * khd->lock serializes the flushes, so if we observed any bit set in
 771	 * the kcq_map, we will always get a request.
 772	 */
 773	rq = list_first_entry_or_null(rqs, struct request, queuelist);
 774	if (rq) {
 775		nr = kyber_get_domain_token(kqd, khd, hctx);
 776		if (nr >= 0) {
 777			khd->batching++;
 778			rq_set_domain_token(rq, nr);
 779			list_del_init(&rq->queuelist);
 780			return rq;
 781		} else {
 782			trace_kyber_throttled(kqd->dev,
 783					      kyber_domain_names[khd->cur_domain]);
 784		}
 785	} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
 786		nr = kyber_get_domain_token(kqd, khd, hctx);
 787		if (nr >= 0) {
 788			kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
 789			rq = list_first_entry(rqs, struct request, queuelist);
 790			khd->batching++;
 791			rq_set_domain_token(rq, nr);
 792			list_del_init(&rq->queuelist);
 793			return rq;
 794		} else {
 795			trace_kyber_throttled(kqd->dev,
 796					      kyber_domain_names[khd->cur_domain]);
 797		}
 798	}
 799
 800	/* There were either no pending requests or no tokens. */
 801	return NULL;
 802}
 803
 804static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
 805{
 806	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
 807	struct kyber_hctx_data *khd = hctx->sched_data;
 808	struct request *rq;
 809	int i;
 810
 811	spin_lock(&khd->lock);
 812
 813	/*
 814	 * First, if we are still entitled to batch, try to dispatch a request
 815	 * from the batch.
 816	 */
 817	if (khd->batching < kyber_batch_size[khd->cur_domain]) {
 818		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
 819		if (rq)
 820			goto out;
 821	}
 822
 823	/*
 824	 * Either,
 825	 * 1. We were no longer entitled to a batch.
 826	 * 2. The domain we were batching didn't have any requests.
 827	 * 3. The domain we were batching was out of tokens.
 828	 *
 829	 * Start another batch. Note that this wraps back around to the original
 830	 * domain if no other domains have requests or tokens.
 831	 */
 832	khd->batching = 0;
 833	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 834		if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
 835			khd->cur_domain = 0;
 836		else
 837			khd->cur_domain++;
 838
 839		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
 840		if (rq)
 841			goto out;
 842	}
 843
 844	rq = NULL;
 845out:
 846	spin_unlock(&khd->lock);
 847	return rq;
 848}
 849
 850static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
 851{
 852	struct kyber_hctx_data *khd = hctx->sched_data;
 853	int i;
 854
 855	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 856		if (!list_empty_careful(&khd->rqs[i]) ||
 857		    sbitmap_any_bit_set(&khd->kcq_map[i]))
 858			return true;
 859	}
 860
 861	return false;
 862}
 863
 864#define KYBER_LAT_SHOW_STORE(domain, name)				\
 865static ssize_t kyber_##name##_lat_show(struct elevator_queue *e,	\
 866				       char *page)			\
 867{									\
 868	struct kyber_queue_data *kqd = e->elevator_data;		\
 869									\
 870	return sprintf(page, "%llu\n", kqd->latency_targets[domain]);	\
 871}									\
 872									\
 873static ssize_t kyber_##name##_lat_store(struct elevator_queue *e,	\
 874					const char *page, size_t count)	\
 875{									\
 876	struct kyber_queue_data *kqd = e->elevator_data;		\
 877	unsigned long long nsec;					\
 878	int ret;							\
 879									\
 880	ret = kstrtoull(page, 10, &nsec);				\
 881	if (ret)							\
 882		return ret;						\
 883									\
 884	kqd->latency_targets[domain] = nsec;				\
 885									\
 886	return count;							\
 887}
 888KYBER_LAT_SHOW_STORE(KYBER_READ, read);
 889KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
 890#undef KYBER_LAT_SHOW_STORE
 891
 892#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
 893static struct elv_fs_entry kyber_sched_attrs[] = {
 894	KYBER_LAT_ATTR(read),
 895	KYBER_LAT_ATTR(write),
 896	__ATTR_NULL
 897};
 898#undef KYBER_LAT_ATTR
 899
 900#ifdef CONFIG_BLK_DEBUG_FS
 901#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name)			\
 902static int kyber_##name##_tokens_show(void *data, struct seq_file *m)	\
 903{									\
 904	struct request_queue *q = data;					\
 905	struct kyber_queue_data *kqd = q->elevator->elevator_data;	\
 906									\
 907	sbitmap_queue_show(&kqd->domain_tokens[domain], m);		\
 908	return 0;							\
 909}									\
 910									\
 911static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos)	\
 912	__acquires(&khd->lock)						\
 913{									\
 914	struct blk_mq_hw_ctx *hctx = m->private;			\
 915	struct kyber_hctx_data *khd = hctx->sched_data;			\
 916									\
 917	spin_lock(&khd->lock);						\
 918	return seq_list_start(&khd->rqs[domain], *pos);			\
 919}									\
 920									\
 921static void *kyber_##name##_rqs_next(struct seq_file *m, void *v,	\
 922				     loff_t *pos)			\
 923{									\
 924	struct blk_mq_hw_ctx *hctx = m->private;			\
 925	struct kyber_hctx_data *khd = hctx->sched_data;			\
 926									\
 927	return seq_list_next(v, &khd->rqs[domain], pos);		\
 928}									\
 929									\
 930static void kyber_##name##_rqs_stop(struct seq_file *m, void *v)	\
 931	__releases(&khd->lock)						\
 932{									\
 933	struct blk_mq_hw_ctx *hctx = m->private;			\
 934	struct kyber_hctx_data *khd = hctx->sched_data;			\
 935									\
 936	spin_unlock(&khd->lock);					\
 937}									\
 938									\
 939static const struct seq_operations kyber_##name##_rqs_seq_ops = {	\
 940	.start	= kyber_##name##_rqs_start,				\
 941	.next	= kyber_##name##_rqs_next,				\
 942	.stop	= kyber_##name##_rqs_stop,				\
 943	.show	= blk_mq_debugfs_rq_show,				\
 944};									\
 945									\
 946static int kyber_##name##_waiting_show(void *data, struct seq_file *m)	\
 947{									\
 948	struct blk_mq_hw_ctx *hctx = data;				\
 949	struct kyber_hctx_data *khd = hctx->sched_data;			\
 950	wait_queue_entry_t *wait = &khd->domain_wait[domain].wait;	\
 951									\
 952	seq_printf(m, "%d\n", !list_empty_careful(&wait->entry));	\
 953	return 0;							\
 954}
 955KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
 956KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
 957KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
 958KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
 959#undef KYBER_DEBUGFS_DOMAIN_ATTRS
 960
 961static int kyber_async_depth_show(void *data, struct seq_file *m)
 962{
 963	struct request_queue *q = data;
 964	struct kyber_queue_data *kqd = q->elevator->elevator_data;
 965
 966	seq_printf(m, "%u\n", kqd->async_depth);
 967	return 0;
 968}
 969
 970static int kyber_cur_domain_show(void *data, struct seq_file *m)
 971{
 972	struct blk_mq_hw_ctx *hctx = data;
 973	struct kyber_hctx_data *khd = hctx->sched_data;
 974
 975	seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
 976	return 0;
 977}
 978
 979static int kyber_batching_show(void *data, struct seq_file *m)
 980{
 981	struct blk_mq_hw_ctx *hctx = data;
 982	struct kyber_hctx_data *khd = hctx->sched_data;
 983
 984	seq_printf(m, "%u\n", khd->batching);
 985	return 0;
 986}
 987
 988#define KYBER_QUEUE_DOMAIN_ATTRS(name)	\
 989	{#name "_tokens", 0400, kyber_##name##_tokens_show}
 990static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
 991	KYBER_QUEUE_DOMAIN_ATTRS(read),
 992	KYBER_QUEUE_DOMAIN_ATTRS(write),
 993	KYBER_QUEUE_DOMAIN_ATTRS(discard),
 994	KYBER_QUEUE_DOMAIN_ATTRS(other),
 995	{"async_depth", 0400, kyber_async_depth_show},
 996	{},
 997};
 998#undef KYBER_QUEUE_DOMAIN_ATTRS
 999
1000#define KYBER_HCTX_DOMAIN_ATTRS(name)					\
1001	{#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops},	\
1002	{#name "_waiting", 0400, kyber_##name##_waiting_show}
1003static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
1004	KYBER_HCTX_DOMAIN_ATTRS(read),
1005	KYBER_HCTX_DOMAIN_ATTRS(write),
1006	KYBER_HCTX_DOMAIN_ATTRS(discard),
1007	KYBER_HCTX_DOMAIN_ATTRS(other),
1008	{"cur_domain", 0400, kyber_cur_domain_show},
1009	{"batching", 0400, kyber_batching_show},
1010	{},
1011};
1012#undef KYBER_HCTX_DOMAIN_ATTRS
1013#endif
1014
1015static struct elevator_type kyber_sched = {
1016	.ops = {
1017		.init_sched = kyber_init_sched,
1018		.exit_sched = kyber_exit_sched,
1019		.init_hctx = kyber_init_hctx,
1020		.exit_hctx = kyber_exit_hctx,
1021		.limit_depth = kyber_limit_depth,
1022		.bio_merge = kyber_bio_merge,
1023		.prepare_request = kyber_prepare_request,
1024		.insert_requests = kyber_insert_requests,
1025		.finish_request = kyber_finish_request,
1026		.requeue_request = kyber_finish_request,
1027		.completed_request = kyber_completed_request,
1028		.dispatch_request = kyber_dispatch_request,
1029		.has_work = kyber_has_work,
1030		.depth_updated = kyber_depth_updated,
1031	},
1032#ifdef CONFIG_BLK_DEBUG_FS
1033	.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
1034	.hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
1035#endif
1036	.elevator_attrs = kyber_sched_attrs,
1037	.elevator_name = "kyber",
1038	.elevator_owner = THIS_MODULE,
1039};
1040
1041static int __init kyber_init(void)
1042{
1043	return elv_register(&kyber_sched);
1044}
1045
1046static void __exit kyber_exit(void)
1047{
1048	elv_unregister(&kyber_sched);
1049}
1050
1051module_init(kyber_init);
1052module_exit(kyber_exit);
1053
1054MODULE_AUTHOR("Omar Sandoval");
1055MODULE_LICENSE("GPL");
1056MODULE_DESCRIPTION("Kyber I/O scheduler");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
   4 * scalable techniques.
   5 *
   6 * Copyright (C) 2017 Facebook
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/blkdev.h>
 
  11#include <linux/module.h>
  12#include <linux/sbitmap.h>
  13
  14#include <trace/events/block.h>
  15
  16#include "elevator.h"
  17#include "blk.h"
  18#include "blk-mq.h"
  19#include "blk-mq-debugfs.h"
  20#include "blk-mq-sched.h"
 
  21
  22#define CREATE_TRACE_POINTS
  23#include <trace/events/kyber.h>
  24
  25/*
  26 * Scheduling domains: the device is divided into multiple domains based on the
  27 * request type.
  28 */
  29enum {
  30	KYBER_READ,
  31	KYBER_WRITE,
  32	KYBER_DISCARD,
  33	KYBER_OTHER,
  34	KYBER_NUM_DOMAINS,
  35};
  36
  37static const char *kyber_domain_names[] = {
  38	[KYBER_READ] = "READ",
  39	[KYBER_WRITE] = "WRITE",
  40	[KYBER_DISCARD] = "DISCARD",
  41	[KYBER_OTHER] = "OTHER",
  42};
  43
  44enum {
  45	/*
  46	 * In order to prevent starvation of synchronous requests by a flood of
  47	 * asynchronous requests, we reserve 25% of requests for synchronous
  48	 * operations.
  49	 */
  50	KYBER_ASYNC_PERCENT = 75,
  51};
  52
  53/*
  54 * Maximum device-wide depth for each scheduling domain.
  55 *
  56 * Even for fast devices with lots of tags like NVMe, you can saturate the
  57 * device with only a fraction of the maximum possible queue depth. So, we cap
  58 * these to a reasonable value.
  59 */
  60static const unsigned int kyber_depth[] = {
  61	[KYBER_READ] = 256,
  62	[KYBER_WRITE] = 128,
  63	[KYBER_DISCARD] = 64,
  64	[KYBER_OTHER] = 16,
  65};
  66
  67/*
  68 * Default latency targets for each scheduling domain.
  69 */
  70static const u64 kyber_latency_targets[] = {
  71	[KYBER_READ] = 2ULL * NSEC_PER_MSEC,
  72	[KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
  73	[KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
  74};
  75
  76/*
  77 * Batch size (number of requests we'll dispatch in a row) for each scheduling
  78 * domain.
  79 */
  80static const unsigned int kyber_batch_size[] = {
  81	[KYBER_READ] = 16,
  82	[KYBER_WRITE] = 8,
  83	[KYBER_DISCARD] = 1,
  84	[KYBER_OTHER] = 1,
  85};
  86
  87/*
  88 * Requests latencies are recorded in a histogram with buckets defined relative
  89 * to the target latency:
  90 *
  91 * <= 1/4 * target latency
  92 * <= 1/2 * target latency
  93 * <= 3/4 * target latency
  94 * <= target latency
  95 * <= 1 1/4 * target latency
  96 * <= 1 1/2 * target latency
  97 * <= 1 3/4 * target latency
  98 * > 1 3/4 * target latency
  99 */
 100enum {
 101	/*
 102	 * The width of the latency histogram buckets is
 103	 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
 104	 */
 105	KYBER_LATENCY_SHIFT = 2,
 106	/*
 107	 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
 108	 * thus, "good".
 109	 */
 110	KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
 111	/* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
 112	KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
 113};
 114
 115/*
 116 * We measure both the total latency and the I/O latency (i.e., latency after
 117 * submitting to the device).
 118 */
 119enum {
 120	KYBER_TOTAL_LATENCY,
 121	KYBER_IO_LATENCY,
 122};
 123
 124static const char *kyber_latency_type_names[] = {
 125	[KYBER_TOTAL_LATENCY] = "total",
 126	[KYBER_IO_LATENCY] = "I/O",
 127};
 128
 129/*
 130 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
 131 * domain except for KYBER_OTHER.
 132 */
 133struct kyber_cpu_latency {
 134	atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
 135};
 136
 137/*
 138 * There is a same mapping between ctx & hctx and kcq & khd,
 139 * we use request->mq_ctx->index_hw to index the kcq in khd.
 140 */
 141struct kyber_ctx_queue {
 142	/*
 143	 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
 144	 * Also protect the rqs on rq_list when merge.
 145	 */
 146	spinlock_t lock;
 147	struct list_head rq_list[KYBER_NUM_DOMAINS];
 148} ____cacheline_aligned_in_smp;
 149
 150struct kyber_queue_data {
 151	struct request_queue *q;
 152	dev_t dev;
 153
 154	/*
 155	 * Each scheduling domain has a limited number of in-flight requests
 156	 * device-wide, limited by these tokens.
 157	 */
 158	struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
 159
 160	/*
 161	 * Async request percentage, converted to per-word depth for
 162	 * sbitmap_get_shallow().
 163	 */
 164	unsigned int async_depth;
 165
 166	struct kyber_cpu_latency __percpu *cpu_latency;
 167
 168	/* Timer for stats aggregation and adjusting domain tokens. */
 169	struct timer_list timer;
 170
 171	unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
 172
 173	unsigned long latency_timeout[KYBER_OTHER];
 174
 175	int domain_p99[KYBER_OTHER];
 176
 177	/* Target latencies in nanoseconds. */
 178	u64 latency_targets[KYBER_OTHER];
 179};
 180
 181struct kyber_hctx_data {
 182	spinlock_t lock;
 183	struct list_head rqs[KYBER_NUM_DOMAINS];
 184	unsigned int cur_domain;
 185	unsigned int batching;
 186	struct kyber_ctx_queue *kcqs;
 187	struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
 188	struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
 189	struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
 190	atomic_t wait_index[KYBER_NUM_DOMAINS];
 191};
 192
 193static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
 194			     void *key);
 195
 196static unsigned int kyber_sched_domain(blk_opf_t opf)
 197{
 198	switch (opf & REQ_OP_MASK) {
 199	case REQ_OP_READ:
 200		return KYBER_READ;
 201	case REQ_OP_WRITE:
 202		return KYBER_WRITE;
 203	case REQ_OP_DISCARD:
 204		return KYBER_DISCARD;
 205	default:
 206		return KYBER_OTHER;
 207	}
 208}
 209
 210static void flush_latency_buckets(struct kyber_queue_data *kqd,
 211				  struct kyber_cpu_latency *cpu_latency,
 212				  unsigned int sched_domain, unsigned int type)
 213{
 214	unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
 215	atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
 216	unsigned int bucket;
 217
 218	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
 219		buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
 220}
 221
 222/*
 223 * Calculate the histogram bucket with the given percentile rank, or -1 if there
 224 * aren't enough samples yet.
 225 */
 226static int calculate_percentile(struct kyber_queue_data *kqd,
 227				unsigned int sched_domain, unsigned int type,
 228				unsigned int percentile)
 229{
 230	unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
 231	unsigned int bucket, samples = 0, percentile_samples;
 232
 233	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
 234		samples += buckets[bucket];
 235
 236	if (!samples)
 237		return -1;
 238
 239	/*
 240	 * We do the calculation once we have 500 samples or one second passes
 241	 * since the first sample was recorded, whichever comes first.
 242	 */
 243	if (!kqd->latency_timeout[sched_domain])
 244		kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
 245	if (samples < 500 &&
 246	    time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
 247		return -1;
 248	}
 249	kqd->latency_timeout[sched_domain] = 0;
 250
 251	percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
 252	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
 253		if (buckets[bucket] >= percentile_samples)
 254			break;
 255		percentile_samples -= buckets[bucket];
 256	}
 257	memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
 258
 259	trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
 260			    kyber_latency_type_names[type], percentile,
 261			    bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
 262
 263	return bucket;
 264}
 265
 266static void kyber_resize_domain(struct kyber_queue_data *kqd,
 267				unsigned int sched_domain, unsigned int depth)
 268{
 269	depth = clamp(depth, 1U, kyber_depth[sched_domain]);
 270	if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
 271		sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
 272		trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
 273				   depth);
 274	}
 275}
 276
 277static void kyber_timer_fn(struct timer_list *t)
 278{
 279	struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
 280	unsigned int sched_domain;
 281	int cpu;
 282	bool bad = false;
 283
 284	/* Sum all of the per-cpu latency histograms. */
 285	for_each_online_cpu(cpu) {
 286		struct kyber_cpu_latency *cpu_latency;
 287
 288		cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
 289		for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
 290			flush_latency_buckets(kqd, cpu_latency, sched_domain,
 291					      KYBER_TOTAL_LATENCY);
 292			flush_latency_buckets(kqd, cpu_latency, sched_domain,
 293					      KYBER_IO_LATENCY);
 294		}
 295	}
 296
 297	/*
 298	 * Check if any domains have a high I/O latency, which might indicate
 299	 * congestion in the device. Note that we use the p90; we don't want to
 300	 * be too sensitive to outliers here.
 301	 */
 302	for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
 303		int p90;
 304
 305		p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
 306					   90);
 307		if (p90 >= KYBER_GOOD_BUCKETS)
 308			bad = true;
 309	}
 310
 311	/*
 312	 * Adjust the scheduling domain depths. If we determined that there was
 313	 * congestion, we throttle all domains with good latencies. Either way,
 314	 * we ease up on throttling domains with bad latencies.
 315	 */
 316	for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
 317		unsigned int orig_depth, depth;
 318		int p99;
 319
 320		p99 = calculate_percentile(kqd, sched_domain,
 321					   KYBER_TOTAL_LATENCY, 99);
 322		/*
 323		 * This is kind of subtle: different domains will not
 324		 * necessarily have enough samples to calculate the latency
 325		 * percentiles during the same window, so we have to remember
 326		 * the p99 for the next time we observe congestion; once we do,
 327		 * we don't want to throttle again until we get more data, so we
 328		 * reset it to -1.
 329		 */
 330		if (bad) {
 331			if (p99 < 0)
 332				p99 = kqd->domain_p99[sched_domain];
 333			kqd->domain_p99[sched_domain] = -1;
 334		} else if (p99 >= 0) {
 335			kqd->domain_p99[sched_domain] = p99;
 336		}
 337		if (p99 < 0)
 338			continue;
 339
 340		/*
 341		 * If this domain has bad latency, throttle less. Otherwise,
 342		 * throttle more iff we determined that there is congestion.
 343		 *
 344		 * The new depth is scaled linearly with the p99 latency vs the
 345		 * latency target. E.g., if the p99 is 3/4 of the target, then
 346		 * we throttle down to 3/4 of the current depth, and if the p99
 347		 * is 2x the target, then we double the depth.
 348		 */
 349		if (bad || p99 >= KYBER_GOOD_BUCKETS) {
 350			orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
 351			depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
 352			kyber_resize_domain(kqd, sched_domain, depth);
 353		}
 354	}
 355}
 356
 357static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
 358{
 359	struct kyber_queue_data *kqd;
 360	int ret = -ENOMEM;
 361	int i;
 362
 363	kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
 364	if (!kqd)
 365		goto err;
 366
 367	kqd->q = q;
 368	kqd->dev = disk_devt(q->disk);
 369
 370	kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
 371					    GFP_KERNEL | __GFP_ZERO);
 372	if (!kqd->cpu_latency)
 373		goto err_kqd;
 374
 375	timer_setup(&kqd->timer, kyber_timer_fn, 0);
 376
 377	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 378		WARN_ON(!kyber_depth[i]);
 379		WARN_ON(!kyber_batch_size[i]);
 380		ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
 381					      kyber_depth[i], -1, false,
 382					      GFP_KERNEL, q->node);
 383		if (ret) {
 384			while (--i >= 0)
 385				sbitmap_queue_free(&kqd->domain_tokens[i]);
 386			goto err_buckets;
 387		}
 388	}
 389
 390	for (i = 0; i < KYBER_OTHER; i++) {
 391		kqd->domain_p99[i] = -1;
 392		kqd->latency_targets[i] = kyber_latency_targets[i];
 393	}
 394
 395	return kqd;
 396
 397err_buckets:
 398	free_percpu(kqd->cpu_latency);
 399err_kqd:
 400	kfree(kqd);
 401err:
 402	return ERR_PTR(ret);
 403}
 404
 405static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
 406{
 407	struct kyber_queue_data *kqd;
 408	struct elevator_queue *eq;
 409
 410	eq = elevator_alloc(q, e);
 411	if (!eq)
 412		return -ENOMEM;
 413
 414	kqd = kyber_queue_data_alloc(q);
 415	if (IS_ERR(kqd)) {
 416		kobject_put(&eq->kobj);
 417		return PTR_ERR(kqd);
 418	}
 419
 420	blk_stat_enable_accounting(q);
 421
 422	blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
 423
 424	eq->elevator_data = kqd;
 425	q->elevator = eq;
 426
 427	return 0;
 428}
 429
 430static void kyber_exit_sched(struct elevator_queue *e)
 431{
 432	struct kyber_queue_data *kqd = e->elevator_data;
 433	int i;
 434
 435	timer_shutdown_sync(&kqd->timer);
 436	blk_stat_disable_accounting(kqd->q);
 437
 438	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
 439		sbitmap_queue_free(&kqd->domain_tokens[i]);
 440	free_percpu(kqd->cpu_latency);
 441	kfree(kqd);
 442}
 443
 444static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
 445{
 446	unsigned int i;
 447
 448	spin_lock_init(&kcq->lock);
 449	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
 450		INIT_LIST_HEAD(&kcq->rq_list[i]);
 451}
 452
 453static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
 454{
 455	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
 456	struct blk_mq_tags *tags = hctx->sched_tags;
 457	unsigned int shift = tags->bitmap_tags.sb.shift;
 458
 459	kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
 460
 461	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
 462}
 463
 464static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 465{
 466	struct kyber_hctx_data *khd;
 467	int i;
 468
 469	khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
 470	if (!khd)
 471		return -ENOMEM;
 472
 473	khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
 474				       sizeof(struct kyber_ctx_queue),
 475				       GFP_KERNEL, hctx->numa_node);
 476	if (!khd->kcqs)
 477		goto err_khd;
 478
 479	for (i = 0; i < hctx->nr_ctx; i++)
 480		kyber_ctx_queue_init(&khd->kcqs[i]);
 481
 482	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 483		if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
 484				      ilog2(8), GFP_KERNEL, hctx->numa_node,
 485				      false, false)) {
 486			while (--i >= 0)
 487				sbitmap_free(&khd->kcq_map[i]);
 488			goto err_kcqs;
 489		}
 490	}
 491
 492	spin_lock_init(&khd->lock);
 493
 494	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 495		INIT_LIST_HEAD(&khd->rqs[i]);
 496		khd->domain_wait[i].sbq = NULL;
 497		init_waitqueue_func_entry(&khd->domain_wait[i].wait,
 498					  kyber_domain_wake);
 499		khd->domain_wait[i].wait.private = hctx;
 500		INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
 501		atomic_set(&khd->wait_index[i], 0);
 502	}
 503
 504	khd->cur_domain = 0;
 505	khd->batching = 0;
 506
 507	hctx->sched_data = khd;
 508	kyber_depth_updated(hctx);
 509
 510	return 0;
 511
 512err_kcqs:
 513	kfree(khd->kcqs);
 514err_khd:
 515	kfree(khd);
 516	return -ENOMEM;
 517}
 518
 519static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 520{
 521	struct kyber_hctx_data *khd = hctx->sched_data;
 522	int i;
 523
 524	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
 525		sbitmap_free(&khd->kcq_map[i]);
 526	kfree(khd->kcqs);
 527	kfree(hctx->sched_data);
 528}
 529
 530static int rq_get_domain_token(struct request *rq)
 531{
 532	return (long)rq->elv.priv[0];
 533}
 534
 535static void rq_set_domain_token(struct request *rq, int token)
 536{
 537	rq->elv.priv[0] = (void *)(long)token;
 538}
 539
 540static void rq_clear_domain_token(struct kyber_queue_data *kqd,
 541				  struct request *rq)
 542{
 543	unsigned int sched_domain;
 544	int nr;
 545
 546	nr = rq_get_domain_token(rq);
 547	if (nr != -1) {
 548		sched_domain = kyber_sched_domain(rq->cmd_flags);
 549		sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
 550				    rq->mq_ctx->cpu);
 551	}
 552}
 553
 554static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
 555{
 556	/*
 557	 * We use the scheduler tags as per-hardware queue queueing tokens.
 558	 * Async requests can be limited at this stage.
 559	 */
 560	if (!op_is_sync(opf)) {
 561		struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
 562
 563		data->shallow_depth = kqd->async_depth;
 564	}
 565}
 566
 567static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
 568		unsigned int nr_segs)
 569{
 570	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
 571	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
 572	struct kyber_hctx_data *khd = hctx->sched_data;
 573	struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
 574	unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
 575	struct list_head *rq_list = &kcq->rq_list[sched_domain];
 576	bool merged;
 577
 578	spin_lock(&kcq->lock);
 579	merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
 580	spin_unlock(&kcq->lock);
 581
 582	return merged;
 583}
 584
 585static void kyber_prepare_request(struct request *rq)
 586{
 587	rq_set_domain_token(rq, -1);
 588}
 589
 590static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
 591				  struct list_head *rq_list,
 592				  blk_insert_t flags)
 593{
 594	struct kyber_hctx_data *khd = hctx->sched_data;
 595	struct request *rq, *next;
 596
 597	list_for_each_entry_safe(rq, next, rq_list, queuelist) {
 598		unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
 599		struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
 600		struct list_head *head = &kcq->rq_list[sched_domain];
 601
 602		spin_lock(&kcq->lock);
 603		trace_block_rq_insert(rq);
 604		if (flags & BLK_MQ_INSERT_AT_HEAD)
 605			list_move(&rq->queuelist, head);
 606		else
 607			list_move_tail(&rq->queuelist, head);
 608		sbitmap_set_bit(&khd->kcq_map[sched_domain],
 609				rq->mq_ctx->index_hw[hctx->type]);
 610		spin_unlock(&kcq->lock);
 611	}
 612}
 613
 614static void kyber_finish_request(struct request *rq)
 615{
 616	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
 617
 618	rq_clear_domain_token(kqd, rq);
 619}
 620
 621static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
 622			       unsigned int sched_domain, unsigned int type,
 623			       u64 target, u64 latency)
 624{
 625	unsigned int bucket;
 626	u64 divisor;
 627
 628	if (latency > 0) {
 629		divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
 630		bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
 631			       KYBER_LATENCY_BUCKETS - 1);
 632	} else {
 633		bucket = 0;
 634	}
 635
 636	atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
 637}
 638
 639static void kyber_completed_request(struct request *rq, u64 now)
 640{
 641	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
 642	struct kyber_cpu_latency *cpu_latency;
 643	unsigned int sched_domain;
 644	u64 target;
 645
 646	sched_domain = kyber_sched_domain(rq->cmd_flags);
 647	if (sched_domain == KYBER_OTHER)
 648		return;
 649
 650	cpu_latency = get_cpu_ptr(kqd->cpu_latency);
 651	target = kqd->latency_targets[sched_domain];
 652	add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
 653			   target, now - rq->start_time_ns);
 654	add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
 655			   now - rq->io_start_time_ns);
 656	put_cpu_ptr(kqd->cpu_latency);
 657
 658	timer_reduce(&kqd->timer, jiffies + HZ / 10);
 659}
 660
 661struct flush_kcq_data {
 662	struct kyber_hctx_data *khd;
 663	unsigned int sched_domain;
 664	struct list_head *list;
 665};
 666
 667static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
 668{
 669	struct flush_kcq_data *flush_data = data;
 670	struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
 671
 672	spin_lock(&kcq->lock);
 673	list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
 674			      flush_data->list);
 675	sbitmap_clear_bit(sb, bitnr);
 676	spin_unlock(&kcq->lock);
 677
 678	return true;
 679}
 680
 681static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
 682				  unsigned int sched_domain,
 683				  struct list_head *list)
 684{
 685	struct flush_kcq_data data = {
 686		.khd = khd,
 687		.sched_domain = sched_domain,
 688		.list = list,
 689	};
 690
 691	sbitmap_for_each_set(&khd->kcq_map[sched_domain],
 692			     flush_busy_kcq, &data);
 693}
 694
 695static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
 696			     void *key)
 697{
 698	struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
 699	struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
 700
 701	sbitmap_del_wait_queue(wait);
 702	blk_mq_run_hw_queue(hctx, true);
 703	return 1;
 704}
 705
 706static int kyber_get_domain_token(struct kyber_queue_data *kqd,
 707				  struct kyber_hctx_data *khd,
 708				  struct blk_mq_hw_ctx *hctx)
 709{
 710	unsigned int sched_domain = khd->cur_domain;
 711	struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
 712	struct sbq_wait *wait = &khd->domain_wait[sched_domain];
 713	struct sbq_wait_state *ws;
 714	int nr;
 715
 716	nr = __sbitmap_queue_get(domain_tokens);
 717
 718	/*
 719	 * If we failed to get a domain token, make sure the hardware queue is
 720	 * run when one becomes available. Note that this is serialized on
 721	 * khd->lock, but we still need to be careful about the waker.
 722	 */
 723	if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
 724		ws = sbq_wait_ptr(domain_tokens,
 725				  &khd->wait_index[sched_domain]);
 726		khd->domain_ws[sched_domain] = ws;
 727		sbitmap_add_wait_queue(domain_tokens, ws, wait);
 728
 729		/*
 730		 * Try again in case a token was freed before we got on the wait
 731		 * queue.
 732		 */
 733		nr = __sbitmap_queue_get(domain_tokens);
 734	}
 735
 736	/*
 737	 * If we got a token while we were on the wait queue, remove ourselves
 738	 * from the wait queue to ensure that all wake ups make forward
 739	 * progress. It's possible that the waker already deleted the entry
 740	 * between the !list_empty_careful() check and us grabbing the lock, but
 741	 * list_del_init() is okay with that.
 742	 */
 743	if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
 744		ws = khd->domain_ws[sched_domain];
 745		spin_lock_irq(&ws->wait.lock);
 746		sbitmap_del_wait_queue(wait);
 747		spin_unlock_irq(&ws->wait.lock);
 748	}
 749
 750	return nr;
 751}
 752
 753static struct request *
 754kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
 755			  struct kyber_hctx_data *khd,
 756			  struct blk_mq_hw_ctx *hctx)
 757{
 758	struct list_head *rqs;
 759	struct request *rq;
 760	int nr;
 761
 762	rqs = &khd->rqs[khd->cur_domain];
 763
 764	/*
 765	 * If we already have a flushed request, then we just need to get a
 766	 * token for it. Otherwise, if there are pending requests in the kcqs,
 767	 * flush the kcqs, but only if we can get a token. If not, we should
 768	 * leave the requests in the kcqs so that they can be merged. Note that
 769	 * khd->lock serializes the flushes, so if we observed any bit set in
 770	 * the kcq_map, we will always get a request.
 771	 */
 772	rq = list_first_entry_or_null(rqs, struct request, queuelist);
 773	if (rq) {
 774		nr = kyber_get_domain_token(kqd, khd, hctx);
 775		if (nr >= 0) {
 776			khd->batching++;
 777			rq_set_domain_token(rq, nr);
 778			list_del_init(&rq->queuelist);
 779			return rq;
 780		} else {
 781			trace_kyber_throttled(kqd->dev,
 782					      kyber_domain_names[khd->cur_domain]);
 783		}
 784	} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
 785		nr = kyber_get_domain_token(kqd, khd, hctx);
 786		if (nr >= 0) {
 787			kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
 788			rq = list_first_entry(rqs, struct request, queuelist);
 789			khd->batching++;
 790			rq_set_domain_token(rq, nr);
 791			list_del_init(&rq->queuelist);
 792			return rq;
 793		} else {
 794			trace_kyber_throttled(kqd->dev,
 795					      kyber_domain_names[khd->cur_domain]);
 796		}
 797	}
 798
 799	/* There were either no pending requests or no tokens. */
 800	return NULL;
 801}
 802
 803static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
 804{
 805	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
 806	struct kyber_hctx_data *khd = hctx->sched_data;
 807	struct request *rq;
 808	int i;
 809
 810	spin_lock(&khd->lock);
 811
 812	/*
 813	 * First, if we are still entitled to batch, try to dispatch a request
 814	 * from the batch.
 815	 */
 816	if (khd->batching < kyber_batch_size[khd->cur_domain]) {
 817		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
 818		if (rq)
 819			goto out;
 820	}
 821
 822	/*
 823	 * Either,
 824	 * 1. We were no longer entitled to a batch.
 825	 * 2. The domain we were batching didn't have any requests.
 826	 * 3. The domain we were batching was out of tokens.
 827	 *
 828	 * Start another batch. Note that this wraps back around to the original
 829	 * domain if no other domains have requests or tokens.
 830	 */
 831	khd->batching = 0;
 832	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 833		if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
 834			khd->cur_domain = 0;
 835		else
 836			khd->cur_domain++;
 837
 838		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
 839		if (rq)
 840			goto out;
 841	}
 842
 843	rq = NULL;
 844out:
 845	spin_unlock(&khd->lock);
 846	return rq;
 847}
 848
 849static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
 850{
 851	struct kyber_hctx_data *khd = hctx->sched_data;
 852	int i;
 853
 854	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 855		if (!list_empty_careful(&khd->rqs[i]) ||
 856		    sbitmap_any_bit_set(&khd->kcq_map[i]))
 857			return true;
 858	}
 859
 860	return false;
 861}
 862
 863#define KYBER_LAT_SHOW_STORE(domain, name)				\
 864static ssize_t kyber_##name##_lat_show(struct elevator_queue *e,	\
 865				       char *page)			\
 866{									\
 867	struct kyber_queue_data *kqd = e->elevator_data;		\
 868									\
 869	return sprintf(page, "%llu\n", kqd->latency_targets[domain]);	\
 870}									\
 871									\
 872static ssize_t kyber_##name##_lat_store(struct elevator_queue *e,	\
 873					const char *page, size_t count)	\
 874{									\
 875	struct kyber_queue_data *kqd = e->elevator_data;		\
 876	unsigned long long nsec;					\
 877	int ret;							\
 878									\
 879	ret = kstrtoull(page, 10, &nsec);				\
 880	if (ret)							\
 881		return ret;						\
 882									\
 883	kqd->latency_targets[domain] = nsec;				\
 884									\
 885	return count;							\
 886}
 887KYBER_LAT_SHOW_STORE(KYBER_READ, read);
 888KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
 889#undef KYBER_LAT_SHOW_STORE
 890
 891#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
 892static struct elv_fs_entry kyber_sched_attrs[] = {
 893	KYBER_LAT_ATTR(read),
 894	KYBER_LAT_ATTR(write),
 895	__ATTR_NULL
 896};
 897#undef KYBER_LAT_ATTR
 898
 899#ifdef CONFIG_BLK_DEBUG_FS
 900#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name)			\
 901static int kyber_##name##_tokens_show(void *data, struct seq_file *m)	\
 902{									\
 903	struct request_queue *q = data;					\
 904	struct kyber_queue_data *kqd = q->elevator->elevator_data;	\
 905									\
 906	sbitmap_queue_show(&kqd->domain_tokens[domain], m);		\
 907	return 0;							\
 908}									\
 909									\
 910static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos)	\
 911	__acquires(&khd->lock)						\
 912{									\
 913	struct blk_mq_hw_ctx *hctx = m->private;			\
 914	struct kyber_hctx_data *khd = hctx->sched_data;			\
 915									\
 916	spin_lock(&khd->lock);						\
 917	return seq_list_start(&khd->rqs[domain], *pos);			\
 918}									\
 919									\
 920static void *kyber_##name##_rqs_next(struct seq_file *m, void *v,	\
 921				     loff_t *pos)			\
 922{									\
 923	struct blk_mq_hw_ctx *hctx = m->private;			\
 924	struct kyber_hctx_data *khd = hctx->sched_data;			\
 925									\
 926	return seq_list_next(v, &khd->rqs[domain], pos);		\
 927}									\
 928									\
 929static void kyber_##name##_rqs_stop(struct seq_file *m, void *v)	\
 930	__releases(&khd->lock)						\
 931{									\
 932	struct blk_mq_hw_ctx *hctx = m->private;			\
 933	struct kyber_hctx_data *khd = hctx->sched_data;			\
 934									\
 935	spin_unlock(&khd->lock);					\
 936}									\
 937									\
 938static const struct seq_operations kyber_##name##_rqs_seq_ops = {	\
 939	.start	= kyber_##name##_rqs_start,				\
 940	.next	= kyber_##name##_rqs_next,				\
 941	.stop	= kyber_##name##_rqs_stop,				\
 942	.show	= blk_mq_debugfs_rq_show,				\
 943};									\
 944									\
 945static int kyber_##name##_waiting_show(void *data, struct seq_file *m)	\
 946{									\
 947	struct blk_mq_hw_ctx *hctx = data;				\
 948	struct kyber_hctx_data *khd = hctx->sched_data;			\
 949	wait_queue_entry_t *wait = &khd->domain_wait[domain].wait;	\
 950									\
 951	seq_printf(m, "%d\n", !list_empty_careful(&wait->entry));	\
 952	return 0;							\
 953}
 954KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
 955KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
 956KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
 957KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
 958#undef KYBER_DEBUGFS_DOMAIN_ATTRS
 959
 960static int kyber_async_depth_show(void *data, struct seq_file *m)
 961{
 962	struct request_queue *q = data;
 963	struct kyber_queue_data *kqd = q->elevator->elevator_data;
 964
 965	seq_printf(m, "%u\n", kqd->async_depth);
 966	return 0;
 967}
 968
 969static int kyber_cur_domain_show(void *data, struct seq_file *m)
 970{
 971	struct blk_mq_hw_ctx *hctx = data;
 972	struct kyber_hctx_data *khd = hctx->sched_data;
 973
 974	seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
 975	return 0;
 976}
 977
 978static int kyber_batching_show(void *data, struct seq_file *m)
 979{
 980	struct blk_mq_hw_ctx *hctx = data;
 981	struct kyber_hctx_data *khd = hctx->sched_data;
 982
 983	seq_printf(m, "%u\n", khd->batching);
 984	return 0;
 985}
 986
 987#define KYBER_QUEUE_DOMAIN_ATTRS(name)	\
 988	{#name "_tokens", 0400, kyber_##name##_tokens_show}
 989static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
 990	KYBER_QUEUE_DOMAIN_ATTRS(read),
 991	KYBER_QUEUE_DOMAIN_ATTRS(write),
 992	KYBER_QUEUE_DOMAIN_ATTRS(discard),
 993	KYBER_QUEUE_DOMAIN_ATTRS(other),
 994	{"async_depth", 0400, kyber_async_depth_show},
 995	{},
 996};
 997#undef KYBER_QUEUE_DOMAIN_ATTRS
 998
 999#define KYBER_HCTX_DOMAIN_ATTRS(name)					\
1000	{#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops},	\
1001	{#name "_waiting", 0400, kyber_##name##_waiting_show}
1002static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
1003	KYBER_HCTX_DOMAIN_ATTRS(read),
1004	KYBER_HCTX_DOMAIN_ATTRS(write),
1005	KYBER_HCTX_DOMAIN_ATTRS(discard),
1006	KYBER_HCTX_DOMAIN_ATTRS(other),
1007	{"cur_domain", 0400, kyber_cur_domain_show},
1008	{"batching", 0400, kyber_batching_show},
1009	{},
1010};
1011#undef KYBER_HCTX_DOMAIN_ATTRS
1012#endif
1013
1014static struct elevator_type kyber_sched = {
1015	.ops = {
1016		.init_sched = kyber_init_sched,
1017		.exit_sched = kyber_exit_sched,
1018		.init_hctx = kyber_init_hctx,
1019		.exit_hctx = kyber_exit_hctx,
1020		.limit_depth = kyber_limit_depth,
1021		.bio_merge = kyber_bio_merge,
1022		.prepare_request = kyber_prepare_request,
1023		.insert_requests = kyber_insert_requests,
1024		.finish_request = kyber_finish_request,
1025		.requeue_request = kyber_finish_request,
1026		.completed_request = kyber_completed_request,
1027		.dispatch_request = kyber_dispatch_request,
1028		.has_work = kyber_has_work,
1029		.depth_updated = kyber_depth_updated,
1030	},
1031#ifdef CONFIG_BLK_DEBUG_FS
1032	.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
1033	.hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
1034#endif
1035	.elevator_attrs = kyber_sched_attrs,
1036	.elevator_name = "kyber",
1037	.elevator_owner = THIS_MODULE,
1038};
1039
1040static int __init kyber_init(void)
1041{
1042	return elv_register(&kyber_sched);
1043}
1044
1045static void __exit kyber_exit(void)
1046{
1047	elv_unregister(&kyber_sched);
1048}
1049
1050module_init(kyber_init);
1051module_exit(kyber_exit);
1052
1053MODULE_AUTHOR("Omar Sandoval");
1054MODULE_LICENSE("GPL");
1055MODULE_DESCRIPTION("Kyber I/O scheduler");