Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/backing-dev.h>
5#include <linux/bio.h>
6#include <linux/blkdev.h>
7#include <linux/mm.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/workqueue.h>
11#include <linux/smp.h>
12
13#include <linux/blk-mq.h>
14#include "blk.h"
15#include "blk-mq.h"
16#include "blk-mq-tag.h"
17
18static void blk_mq_sysfs_release(struct kobject *kobj)
19{
20 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21
22 free_percpu(ctxs->queue_ctx);
23 kfree(ctxs);
24}
25
26static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27{
28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29
30 /* ctx->ctxs won't be released until all ctx are freed */
31 kobject_put(&ctx->ctxs->kobj);
32}
33
34static void blk_mq_hw_sysfs_release(struct kobject *kobj)
35{
36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
37 kobj);
38
39 blk_free_flush_queue(hctx->fq);
40 sbitmap_free(&hctx->ctx_map);
41 free_cpumask_var(hctx->cpumask);
42 kfree(hctx->ctxs);
43 kfree(hctx);
44}
45
46struct blk_mq_hw_ctx_sysfs_entry {
47 struct attribute attr;
48 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
49 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
50};
51
52static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
53 struct attribute *attr, char *page)
54{
55 struct blk_mq_hw_ctx_sysfs_entry *entry;
56 struct blk_mq_hw_ctx *hctx;
57 struct request_queue *q;
58 ssize_t res;
59
60 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
61 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
62 q = hctx->queue;
63
64 if (!entry->show)
65 return -EIO;
66
67 mutex_lock(&q->sysfs_lock);
68 res = entry->show(hctx, page);
69 mutex_unlock(&q->sysfs_lock);
70 return res;
71}
72
73static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
74 struct attribute *attr, const char *page,
75 size_t length)
76{
77 struct blk_mq_hw_ctx_sysfs_entry *entry;
78 struct blk_mq_hw_ctx *hctx;
79 struct request_queue *q;
80 ssize_t res;
81
82 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
83 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
84 q = hctx->queue;
85
86 if (!entry->store)
87 return -EIO;
88
89 mutex_lock(&q->sysfs_lock);
90 res = entry->store(hctx, page, length);
91 mutex_unlock(&q->sysfs_lock);
92 return res;
93}
94
95static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
96 char *page)
97{
98 return sprintf(page, "%u\n", hctx->tags->nr_tags);
99}
100
101static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
102 char *page)
103{
104 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
105}
106
107static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
108{
109 const size_t size = PAGE_SIZE - 1;
110 unsigned int i, first = 1;
111 int ret = 0, pos = 0;
112
113 for_each_cpu(i, hctx->cpumask) {
114 if (first)
115 ret = snprintf(pos + page, size - pos, "%u", i);
116 else
117 ret = snprintf(pos + page, size - pos, ", %u", i);
118
119 if (ret >= size - pos)
120 break;
121
122 first = 0;
123 pos += ret;
124 }
125
126 ret = snprintf(pos + page, size + 1 - pos, "\n");
127 return pos + ret;
128}
129
130static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
131 .attr = {.name = "nr_tags", .mode = 0444 },
132 .show = blk_mq_hw_sysfs_nr_tags_show,
133};
134static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
135 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
136 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
137};
138static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
139 .attr = {.name = "cpu_list", .mode = 0444 },
140 .show = blk_mq_hw_sysfs_cpus_show,
141};
142
143static struct attribute *default_hw_ctx_attrs[] = {
144 &blk_mq_hw_sysfs_nr_tags.attr,
145 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
146 &blk_mq_hw_sysfs_cpus.attr,
147 NULL,
148};
149ATTRIBUTE_GROUPS(default_hw_ctx);
150
151static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
152 .show = blk_mq_hw_sysfs_show,
153 .store = blk_mq_hw_sysfs_store,
154};
155
156static struct kobj_type blk_mq_ktype = {
157 .release = blk_mq_sysfs_release,
158};
159
160static struct kobj_type blk_mq_ctx_ktype = {
161 .release = blk_mq_ctx_sysfs_release,
162};
163
164static struct kobj_type blk_mq_hw_ktype = {
165 .sysfs_ops = &blk_mq_hw_sysfs_ops,
166 .default_groups = default_hw_ctx_groups,
167 .release = blk_mq_hw_sysfs_release,
168};
169
170static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
171{
172 struct blk_mq_ctx *ctx;
173 int i;
174
175 if (!hctx->nr_ctx)
176 return;
177
178 hctx_for_each_ctx(hctx, ctx, i)
179 kobject_del(&ctx->kobj);
180
181 kobject_del(&hctx->kobj);
182}
183
184static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
185{
186 struct request_queue *q = hctx->queue;
187 struct blk_mq_ctx *ctx;
188 int i, j, ret;
189
190 if (!hctx->nr_ctx)
191 return 0;
192
193 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
194 if (ret)
195 return ret;
196
197 hctx_for_each_ctx(hctx, ctx, i) {
198 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
199 if (ret)
200 goto out;
201 }
202
203 return 0;
204out:
205 hctx_for_each_ctx(hctx, ctx, j) {
206 if (j < i)
207 kobject_del(&ctx->kobj);
208 }
209 kobject_del(&hctx->kobj);
210 return ret;
211}
212
213void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
214{
215 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
216}
217
218void blk_mq_sysfs_deinit(struct request_queue *q)
219{
220 struct blk_mq_ctx *ctx;
221 int cpu;
222
223 for_each_possible_cpu(cpu) {
224 ctx = per_cpu_ptr(q->queue_ctx, cpu);
225 kobject_put(&ctx->kobj);
226 }
227 kobject_put(q->mq_kobj);
228}
229
230void blk_mq_sysfs_init(struct request_queue *q)
231{
232 struct blk_mq_ctx *ctx;
233 int cpu;
234
235 kobject_init(q->mq_kobj, &blk_mq_ktype);
236
237 for_each_possible_cpu(cpu) {
238 ctx = per_cpu_ptr(q->queue_ctx, cpu);
239
240 kobject_get(q->mq_kobj);
241 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
242 }
243}
244
245int blk_mq_sysfs_register(struct gendisk *disk)
246{
247 struct request_queue *q = disk->queue;
248 struct blk_mq_hw_ctx *hctx;
249 unsigned long i, j;
250 int ret;
251
252 lockdep_assert_held(&q->sysfs_dir_lock);
253
254 ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
255 if (ret < 0)
256 goto out;
257
258 kobject_uevent(q->mq_kobj, KOBJ_ADD);
259
260 queue_for_each_hw_ctx(q, hctx, i) {
261 ret = blk_mq_register_hctx(hctx);
262 if (ret)
263 goto unreg;
264 }
265
266 q->mq_sysfs_init_done = true;
267
268out:
269 return ret;
270
271unreg:
272 queue_for_each_hw_ctx(q, hctx, j) {
273 if (j < i)
274 blk_mq_unregister_hctx(hctx);
275 }
276
277 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
278 kobject_del(q->mq_kobj);
279 return ret;
280}
281
282void blk_mq_sysfs_unregister(struct gendisk *disk)
283{
284 struct request_queue *q = disk->queue;
285 struct blk_mq_hw_ctx *hctx;
286 unsigned long i;
287
288 lockdep_assert_held(&q->sysfs_dir_lock);
289
290 queue_for_each_hw_ctx(q, hctx, i)
291 blk_mq_unregister_hctx(hctx);
292
293 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
294 kobject_del(q->mq_kobj);
295
296 q->mq_sysfs_init_done = false;
297}
298
299void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
300{
301 struct blk_mq_hw_ctx *hctx;
302 unsigned long i;
303
304 mutex_lock(&q->sysfs_dir_lock);
305 if (!q->mq_sysfs_init_done)
306 goto unlock;
307
308 queue_for_each_hw_ctx(q, hctx, i)
309 blk_mq_unregister_hctx(hctx);
310
311unlock:
312 mutex_unlock(&q->sysfs_dir_lock);
313}
314
315int blk_mq_sysfs_register_hctxs(struct request_queue *q)
316{
317 struct blk_mq_hw_ctx *hctx;
318 unsigned long i;
319 int ret = 0;
320
321 mutex_lock(&q->sysfs_dir_lock);
322 if (!q->mq_sysfs_init_done)
323 goto unlock;
324
325 queue_for_each_hw_ctx(q, hctx, i) {
326 ret = blk_mq_register_hctx(hctx);
327 if (ret)
328 break;
329 }
330
331unlock:
332 mutex_unlock(&q->sysfs_dir_lock);
333
334 return ret;
335}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/backing-dev.h>
5#include <linux/bio.h>
6#include <linux/blkdev.h>
7#include <linux/mm.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/workqueue.h>
11#include <linux/smp.h>
12
13#include "blk.h"
14#include "blk-mq.h"
15
16static void blk_mq_sysfs_release(struct kobject *kobj)
17{
18 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
19
20 free_percpu(ctxs->queue_ctx);
21 kfree(ctxs);
22}
23
24static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
25{
26 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
27
28 /* ctx->ctxs won't be released until all ctx are freed */
29 kobject_put(&ctx->ctxs->kobj);
30}
31
32static void blk_mq_hw_sysfs_release(struct kobject *kobj)
33{
34 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
35 kobj);
36
37 blk_free_flush_queue(hctx->fq);
38 sbitmap_free(&hctx->ctx_map);
39 free_cpumask_var(hctx->cpumask);
40 kfree(hctx->ctxs);
41 kfree(hctx);
42}
43
44struct blk_mq_hw_ctx_sysfs_entry {
45 struct attribute attr;
46 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
47};
48
49static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
50 struct attribute *attr, char *page)
51{
52 struct blk_mq_hw_ctx_sysfs_entry *entry;
53 struct blk_mq_hw_ctx *hctx;
54 struct request_queue *q;
55 ssize_t res;
56
57 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
58 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
59 q = hctx->queue;
60
61 if (!entry->show)
62 return -EIO;
63
64 mutex_lock(&q->sysfs_lock);
65 res = entry->show(hctx, page);
66 mutex_unlock(&q->sysfs_lock);
67 return res;
68}
69
70static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
71 char *page)
72{
73 return sprintf(page, "%u\n", hctx->tags->nr_tags);
74}
75
76static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
77 char *page)
78{
79 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
80}
81
82static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
83{
84 const size_t size = PAGE_SIZE - 1;
85 unsigned int i, first = 1;
86 int ret = 0, pos = 0;
87
88 for_each_cpu(i, hctx->cpumask) {
89 if (first)
90 ret = snprintf(pos + page, size - pos, "%u", i);
91 else
92 ret = snprintf(pos + page, size - pos, ", %u", i);
93
94 if (ret >= size - pos)
95 break;
96
97 first = 0;
98 pos += ret;
99 }
100
101 ret = snprintf(pos + page, size + 1 - pos, "\n");
102 return pos + ret;
103}
104
105static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
106 .attr = {.name = "nr_tags", .mode = 0444 },
107 .show = blk_mq_hw_sysfs_nr_tags_show,
108};
109static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
110 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
111 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
112};
113static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
114 .attr = {.name = "cpu_list", .mode = 0444 },
115 .show = blk_mq_hw_sysfs_cpus_show,
116};
117
118static struct attribute *default_hw_ctx_attrs[] = {
119 &blk_mq_hw_sysfs_nr_tags.attr,
120 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
121 &blk_mq_hw_sysfs_cpus.attr,
122 NULL,
123};
124ATTRIBUTE_GROUPS(default_hw_ctx);
125
126static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
127 .show = blk_mq_hw_sysfs_show,
128};
129
130static const struct kobj_type blk_mq_ktype = {
131 .release = blk_mq_sysfs_release,
132};
133
134static const struct kobj_type blk_mq_ctx_ktype = {
135 .release = blk_mq_ctx_sysfs_release,
136};
137
138static const struct kobj_type blk_mq_hw_ktype = {
139 .sysfs_ops = &blk_mq_hw_sysfs_ops,
140 .default_groups = default_hw_ctx_groups,
141 .release = blk_mq_hw_sysfs_release,
142};
143
144static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
145{
146 struct blk_mq_ctx *ctx;
147 int i;
148
149 if (!hctx->nr_ctx)
150 return;
151
152 hctx_for_each_ctx(hctx, ctx, i)
153 kobject_del(&ctx->kobj);
154
155 kobject_del(&hctx->kobj);
156}
157
158static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
159{
160 struct request_queue *q = hctx->queue;
161 struct blk_mq_ctx *ctx;
162 int i, j, ret;
163
164 if (!hctx->nr_ctx)
165 return 0;
166
167 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
168 if (ret)
169 return ret;
170
171 hctx_for_each_ctx(hctx, ctx, i) {
172 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
173 if (ret)
174 goto out;
175 }
176
177 return 0;
178out:
179 hctx_for_each_ctx(hctx, ctx, j) {
180 if (j < i)
181 kobject_del(&ctx->kobj);
182 }
183 kobject_del(&hctx->kobj);
184 return ret;
185}
186
187void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
188{
189 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
190}
191
192void blk_mq_sysfs_deinit(struct request_queue *q)
193{
194 struct blk_mq_ctx *ctx;
195 int cpu;
196
197 for_each_possible_cpu(cpu) {
198 ctx = per_cpu_ptr(q->queue_ctx, cpu);
199 kobject_put(&ctx->kobj);
200 }
201 kobject_put(q->mq_kobj);
202}
203
204void blk_mq_sysfs_init(struct request_queue *q)
205{
206 struct blk_mq_ctx *ctx;
207 int cpu;
208
209 kobject_init(q->mq_kobj, &blk_mq_ktype);
210
211 for_each_possible_cpu(cpu) {
212 ctx = per_cpu_ptr(q->queue_ctx, cpu);
213
214 kobject_get(q->mq_kobj);
215 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
216 }
217}
218
219int blk_mq_sysfs_register(struct gendisk *disk)
220{
221 struct request_queue *q = disk->queue;
222 struct blk_mq_hw_ctx *hctx;
223 unsigned long i, j;
224 int ret;
225
226 lockdep_assert_held(&q->sysfs_dir_lock);
227
228 ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
229 if (ret < 0)
230 goto out;
231
232 kobject_uevent(q->mq_kobj, KOBJ_ADD);
233
234 queue_for_each_hw_ctx(q, hctx, i) {
235 ret = blk_mq_register_hctx(hctx);
236 if (ret)
237 goto unreg;
238 }
239
240 q->mq_sysfs_init_done = true;
241
242out:
243 return ret;
244
245unreg:
246 queue_for_each_hw_ctx(q, hctx, j) {
247 if (j < i)
248 blk_mq_unregister_hctx(hctx);
249 }
250
251 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
252 kobject_del(q->mq_kobj);
253 return ret;
254}
255
256void blk_mq_sysfs_unregister(struct gendisk *disk)
257{
258 struct request_queue *q = disk->queue;
259 struct blk_mq_hw_ctx *hctx;
260 unsigned long i;
261
262 lockdep_assert_held(&q->sysfs_dir_lock);
263
264 queue_for_each_hw_ctx(q, hctx, i)
265 blk_mq_unregister_hctx(hctx);
266
267 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
268 kobject_del(q->mq_kobj);
269
270 q->mq_sysfs_init_done = false;
271}
272
273void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
274{
275 struct blk_mq_hw_ctx *hctx;
276 unsigned long i;
277
278 mutex_lock(&q->sysfs_dir_lock);
279 if (!q->mq_sysfs_init_done)
280 goto unlock;
281
282 queue_for_each_hw_ctx(q, hctx, i)
283 blk_mq_unregister_hctx(hctx);
284
285unlock:
286 mutex_unlock(&q->sysfs_dir_lock);
287}
288
289int blk_mq_sysfs_register_hctxs(struct request_queue *q)
290{
291 struct blk_mq_hw_ctx *hctx;
292 unsigned long i;
293 int ret = 0;
294
295 mutex_lock(&q->sysfs_dir_lock);
296 if (!q->mq_sysfs_init_done)
297 goto unlock;
298
299 queue_for_each_hw_ctx(q, hctx, i) {
300 ret = blk_mq_register_hctx(hctx);
301 if (ret)
302 break;
303 }
304
305unlock:
306 mutex_unlock(&q->sysfs_dir_lock);
307
308 return ret;
309}