Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Userspace block device - block device which IO is handled from userspace
4 *
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
7 *
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
9 *
10 * (part of code stolen from loop.c)
11 */
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/sched.h>
15#include <linux/fs.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/stat.h>
19#include <linux/errno.h>
20#include <linux/major.h>
21#include <linux/wait.h>
22#include <linux/blkdev.h>
23#include <linux/init.h>
24#include <linux/swap.h>
25#include <linux/slab.h>
26#include <linux/compat.h>
27#include <linux/mutex.h>
28#include <linux/writeback.h>
29#include <linux/completion.h>
30#include <linux/highmem.h>
31#include <linux/sysfs.h>
32#include <linux/miscdevice.h>
33#include <linux/falloc.h>
34#include <linux/uio.h>
35#include <linux/ioprio.h>
36#include <linux/sched/mm.h>
37#include <linux/uaccess.h>
38#include <linux/cdev.h>
39#include <linux/io_uring/cmd.h>
40#include <linux/blk-mq.h>
41#include <linux/delay.h>
42#include <linux/mm.h>
43#include <asm/page.h>
44#include <linux/task_work.h>
45#include <linux/namei.h>
46#include <linux/kref.h>
47#include <uapi/linux/ublk_cmd.h>
48
49#define UBLK_MINORS (1U << MINORBITS)
50
51/* All UBLK_F_* have to be included into UBLK_F_ALL */
52#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
53 | UBLK_F_URING_CMD_COMP_IN_TASK \
54 | UBLK_F_NEED_GET_DATA \
55 | UBLK_F_USER_RECOVERY \
56 | UBLK_F_USER_RECOVERY_REISSUE \
57 | UBLK_F_UNPRIVILEGED_DEV \
58 | UBLK_F_CMD_IOCTL_ENCODE \
59 | UBLK_F_USER_COPY \
60 | UBLK_F_ZONED)
61
62/* All UBLK_PARAM_TYPE_* should be included here */
63#define UBLK_PARAM_TYPE_ALL \
64 (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
65 UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
66
67struct ublk_rq_data {
68 struct llist_node node;
69
70 struct kref ref;
71 __u64 sector;
72 __u32 operation;
73 __u32 nr_zones;
74};
75
76struct ublk_uring_cmd_pdu {
77 struct ublk_queue *ubq;
78 u16 tag;
79};
80
81/*
82 * io command is active: sqe cmd is received, and its cqe isn't done
83 *
84 * If the flag is set, the io command is owned by ublk driver, and waited
85 * for incoming blk-mq request from the ublk block device.
86 *
87 * If the flag is cleared, the io command will be completed, and owned by
88 * ublk server.
89 */
90#define UBLK_IO_FLAG_ACTIVE 0x01
91
92/*
93 * IO command is completed via cqe, and it is being handled by ublksrv, and
94 * not committed yet
95 *
96 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
97 * cross verification
98 */
99#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
100
101/*
102 * IO command is aborted, so this flag is set in case of
103 * !UBLK_IO_FLAG_ACTIVE.
104 *
105 * After this flag is observed, any pending or new incoming request
106 * associated with this io command will be failed immediately
107 */
108#define UBLK_IO_FLAG_ABORTED 0x04
109
110/*
111 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
112 * get data buffer address from ublksrv.
113 *
114 * Then, bio data could be copied into this data buffer for a WRITE request
115 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
116 */
117#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
118
119/* atomic RW with ubq->cancel_lock */
120#define UBLK_IO_FLAG_CANCELED 0x80000000
121
122struct ublk_io {
123 /* userspace buffer address from io cmd */
124 __u64 addr;
125 unsigned int flags;
126 int res;
127
128 struct io_uring_cmd *cmd;
129};
130
131struct ublk_queue {
132 int q_id;
133 int q_depth;
134
135 unsigned long flags;
136 struct task_struct *ubq_daemon;
137 char *io_cmd_buf;
138
139 struct llist_head io_cmds;
140
141 unsigned long io_addr; /* mapped vm address */
142 unsigned int max_io_sz;
143 bool force_abort;
144 bool timeout;
145 bool canceling;
146 unsigned short nr_io_ready; /* how many ios setup */
147 spinlock_t cancel_lock;
148 struct ublk_device *dev;
149 struct ublk_io ios[];
150};
151
152struct ublk_device {
153 struct gendisk *ub_disk;
154
155 char *__queues;
156
157 unsigned int queue_size;
158 struct ublksrv_ctrl_dev_info dev_info;
159
160 struct blk_mq_tag_set tag_set;
161
162 struct cdev cdev;
163 struct device cdev_dev;
164
165#define UB_STATE_OPEN 0
166#define UB_STATE_USED 1
167#define UB_STATE_DELETED 2
168 unsigned long state;
169 int ub_number;
170
171 struct mutex mutex;
172
173 spinlock_t lock;
174 struct mm_struct *mm;
175
176 struct ublk_params params;
177
178 struct completion completion;
179 unsigned int nr_queues_ready;
180 unsigned int nr_privileged_daemon;
181
182 struct work_struct quiesce_work;
183 struct work_struct stop_work;
184};
185
186/* header of ublk_params */
187struct ublk_params_header {
188 __u32 len;
189 __u32 types;
190};
191
192static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
193
194static inline unsigned int ublk_req_build_flags(struct request *req);
195static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
196 int tag);
197static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
198{
199 return ub->dev_info.flags & UBLK_F_USER_COPY;
200}
201
202static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
203{
204 return ub->dev_info.flags & UBLK_F_ZONED;
205}
206
207static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
208{
209 return ubq->flags & UBLK_F_ZONED;
210}
211
212#ifdef CONFIG_BLK_DEV_ZONED
213
214static int ublk_get_nr_zones(const struct ublk_device *ub)
215{
216 const struct ublk_param_basic *p = &ub->params.basic;
217
218 /* Zone size is a power of 2 */
219 return p->dev_sectors >> ilog2(p->chunk_sectors);
220}
221
222static int ublk_revalidate_disk_zones(struct ublk_device *ub)
223{
224 return blk_revalidate_disk_zones(ub->ub_disk, NULL);
225}
226
227static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
228{
229 const struct ublk_param_zoned *p = &ub->params.zoned;
230 int nr_zones;
231
232 if (!ublk_dev_is_zoned(ub))
233 return -EINVAL;
234
235 if (!p->max_zone_append_sectors)
236 return -EINVAL;
237
238 nr_zones = ublk_get_nr_zones(ub);
239
240 if (p->max_active_zones > nr_zones)
241 return -EINVAL;
242
243 if (p->max_open_zones > nr_zones)
244 return -EINVAL;
245
246 return 0;
247}
248
249static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
250{
251 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
252 blk_queue_required_elevator_features(ub->ub_disk->queue,
253 ELEVATOR_F_ZBD_SEQ_WRITE);
254 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
255}
256
257/* Based on virtblk_alloc_report_buffer */
258static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
259 unsigned int nr_zones, size_t *buflen)
260{
261 struct request_queue *q = ublk->ub_disk->queue;
262 size_t bufsize;
263 void *buf;
264
265 nr_zones = min_t(unsigned int, nr_zones,
266 ublk->ub_disk->nr_zones);
267
268 bufsize = nr_zones * sizeof(struct blk_zone);
269 bufsize =
270 min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT);
271
272 while (bufsize >= sizeof(struct blk_zone)) {
273 buf = kvmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
274 if (buf) {
275 *buflen = bufsize;
276 return buf;
277 }
278 bufsize >>= 1;
279 }
280
281 *buflen = 0;
282 return NULL;
283}
284
285static int ublk_report_zones(struct gendisk *disk, sector_t sector,
286 unsigned int nr_zones, report_zones_cb cb, void *data)
287{
288 struct ublk_device *ub = disk->private_data;
289 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
290 unsigned int first_zone = sector >> ilog2(zone_size_sectors);
291 unsigned int done_zones = 0;
292 unsigned int max_zones_per_request;
293 int ret;
294 struct blk_zone *buffer;
295 size_t buffer_length;
296
297 nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone,
298 nr_zones);
299
300 buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length);
301 if (!buffer)
302 return -ENOMEM;
303
304 max_zones_per_request = buffer_length / sizeof(struct blk_zone);
305
306 while (done_zones < nr_zones) {
307 unsigned int remaining_zones = nr_zones - done_zones;
308 unsigned int zones_in_request =
309 min_t(unsigned int, remaining_zones, max_zones_per_request);
310 struct request *req;
311 struct ublk_rq_data *pdu;
312 blk_status_t status;
313
314 memset(buffer, 0, buffer_length);
315
316 req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
317 if (IS_ERR(req)) {
318 ret = PTR_ERR(req);
319 goto out;
320 }
321
322 pdu = blk_mq_rq_to_pdu(req);
323 pdu->operation = UBLK_IO_OP_REPORT_ZONES;
324 pdu->sector = sector;
325 pdu->nr_zones = zones_in_request;
326
327 ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
328 GFP_KERNEL);
329 if (ret) {
330 blk_mq_free_request(req);
331 goto out;
332 }
333
334 status = blk_execute_rq(req, 0);
335 ret = blk_status_to_errno(status);
336 blk_mq_free_request(req);
337 if (ret)
338 goto out;
339
340 for (unsigned int i = 0; i < zones_in_request; i++) {
341 struct blk_zone *zone = buffer + i;
342
343 /* A zero length zone means no more zones in this response */
344 if (!zone->len)
345 break;
346
347 ret = cb(zone, i, data);
348 if (ret)
349 goto out;
350
351 done_zones++;
352 sector += zone_size_sectors;
353
354 }
355 }
356
357 ret = done_zones;
358
359out:
360 kvfree(buffer);
361 return ret;
362}
363
364static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
365 struct request *req)
366{
367 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
368 struct ublk_io *io = &ubq->ios[req->tag];
369 struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
370 u32 ublk_op;
371
372 switch (req_op(req)) {
373 case REQ_OP_ZONE_OPEN:
374 ublk_op = UBLK_IO_OP_ZONE_OPEN;
375 break;
376 case REQ_OP_ZONE_CLOSE:
377 ublk_op = UBLK_IO_OP_ZONE_CLOSE;
378 break;
379 case REQ_OP_ZONE_FINISH:
380 ublk_op = UBLK_IO_OP_ZONE_FINISH;
381 break;
382 case REQ_OP_ZONE_RESET:
383 ublk_op = UBLK_IO_OP_ZONE_RESET;
384 break;
385 case REQ_OP_ZONE_APPEND:
386 ublk_op = UBLK_IO_OP_ZONE_APPEND;
387 break;
388 case REQ_OP_ZONE_RESET_ALL:
389 ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
390 break;
391 case REQ_OP_DRV_IN:
392 ublk_op = pdu->operation;
393 switch (ublk_op) {
394 case UBLK_IO_OP_REPORT_ZONES:
395 iod->op_flags = ublk_op | ublk_req_build_flags(req);
396 iod->nr_zones = pdu->nr_zones;
397 iod->start_sector = pdu->sector;
398 return BLK_STS_OK;
399 default:
400 return BLK_STS_IOERR;
401 }
402 case REQ_OP_DRV_OUT:
403 /* We do not support drv_out */
404 return BLK_STS_NOTSUPP;
405 default:
406 return BLK_STS_IOERR;
407 }
408
409 iod->op_flags = ublk_op | ublk_req_build_flags(req);
410 iod->nr_sectors = blk_rq_sectors(req);
411 iod->start_sector = blk_rq_pos(req);
412 iod->addr = io->addr;
413
414 return BLK_STS_OK;
415}
416
417#else
418
419#define ublk_report_zones (NULL)
420
421static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
422{
423 return -EOPNOTSUPP;
424}
425
426static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
427{
428}
429
430static int ublk_revalidate_disk_zones(struct ublk_device *ub)
431{
432 return 0;
433}
434
435static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
436 struct request *req)
437{
438 return BLK_STS_NOTSUPP;
439}
440
441#endif
442
443static inline void __ublk_complete_rq(struct request *req);
444static void ublk_complete_rq(struct kref *ref);
445
446static dev_t ublk_chr_devt;
447static const struct class ublk_chr_class = {
448 .name = "ublk-char",
449};
450
451static DEFINE_IDR(ublk_index_idr);
452static DEFINE_SPINLOCK(ublk_idr_lock);
453static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
454
455static DEFINE_MUTEX(ublk_ctl_mutex);
456
457/*
458 * Max ublk devices allowed to add
459 *
460 * It can be extended to one per-user limit in future or even controlled
461 * by cgroup.
462 */
463#define UBLK_MAX_UBLKS UBLK_MINORS
464static unsigned int ublks_max = 64;
465static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
466
467static struct miscdevice ublk_misc;
468
469static inline unsigned ublk_pos_to_hwq(loff_t pos)
470{
471 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
472 UBLK_QID_BITS_MASK;
473}
474
475static inline unsigned ublk_pos_to_buf_off(loff_t pos)
476{
477 return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
478}
479
480static inline unsigned ublk_pos_to_tag(loff_t pos)
481{
482 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
483 UBLK_TAG_BITS_MASK;
484}
485
486static void ublk_dev_param_basic_apply(struct ublk_device *ub)
487{
488 struct request_queue *q = ub->ub_disk->queue;
489 const struct ublk_param_basic *p = &ub->params.basic;
490
491 blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
492 p->attrs & UBLK_ATTR_FUA);
493 if (p->attrs & UBLK_ATTR_ROTATIONAL)
494 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
495 else
496 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
497
498 if (p->attrs & UBLK_ATTR_READ_ONLY)
499 set_disk_ro(ub->ub_disk, true);
500
501 set_capacity(ub->ub_disk, p->dev_sectors);
502}
503
504static int ublk_validate_params(const struct ublk_device *ub)
505{
506 /* basic param is the only one which must be set */
507 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
508 const struct ublk_param_basic *p = &ub->params.basic;
509
510 if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
511 return -EINVAL;
512
513 if (p->logical_bs_shift > p->physical_bs_shift)
514 return -EINVAL;
515
516 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
517 return -EINVAL;
518
519 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors)
520 return -EINVAL;
521 } else
522 return -EINVAL;
523
524 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
525 const struct ublk_param_discard *p = &ub->params.discard;
526
527 /* So far, only support single segment discard */
528 if (p->max_discard_sectors && p->max_discard_segments != 1)
529 return -EINVAL;
530
531 if (!p->discard_granularity)
532 return -EINVAL;
533 }
534
535 /* dev_t is read-only */
536 if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
537 return -EINVAL;
538
539 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
540 return ublk_dev_param_zoned_validate(ub);
541 else if (ublk_dev_is_zoned(ub))
542 return -EINVAL;
543
544 return 0;
545}
546
547static void ublk_apply_params(struct ublk_device *ub)
548{
549 ublk_dev_param_basic_apply(ub);
550
551 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
552 ublk_dev_param_zoned_apply(ub);
553}
554
555static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
556{
557 return ubq->flags & UBLK_F_USER_COPY;
558}
559
560static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
561{
562 /*
563 * read()/write() is involved in user copy, so request reference
564 * has to be grabbed
565 */
566 return ublk_support_user_copy(ubq);
567}
568
569static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
570 struct request *req)
571{
572 if (ublk_need_req_ref(ubq)) {
573 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
574
575 kref_init(&data->ref);
576 }
577}
578
579static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
580 struct request *req)
581{
582 if (ublk_need_req_ref(ubq)) {
583 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
584
585 return kref_get_unless_zero(&data->ref);
586 }
587
588 return true;
589}
590
591static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
592 struct request *req)
593{
594 if (ublk_need_req_ref(ubq)) {
595 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
596
597 kref_put(&data->ref, ublk_complete_rq);
598 } else {
599 __ublk_complete_rq(req);
600 }
601}
602
603static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
604{
605 return ubq->flags & UBLK_F_NEED_GET_DATA;
606}
607
608/* Called in slow path only, keep it noinline for trace purpose */
609static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
610{
611 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
612 return ub;
613 return NULL;
614}
615
616/* Called in slow path only, keep it noinline for trace purpose */
617static noinline void ublk_put_device(struct ublk_device *ub)
618{
619 put_device(&ub->cdev_dev);
620}
621
622static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
623 int qid)
624{
625 return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
626}
627
628static inline bool ublk_rq_has_data(const struct request *rq)
629{
630 return bio_has_data(rq->bio);
631}
632
633static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
634 int tag)
635{
636 return (struct ublksrv_io_desc *)
637 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
638}
639
640static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
641{
642 return ublk_get_queue(ub, q_id)->io_cmd_buf;
643}
644
645static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
646{
647 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
648
649 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
650 PAGE_SIZE);
651}
652
653static inline bool ublk_queue_can_use_recovery_reissue(
654 struct ublk_queue *ubq)
655{
656 return (ubq->flags & UBLK_F_USER_RECOVERY) &&
657 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
658}
659
660static inline bool ublk_queue_can_use_recovery(
661 struct ublk_queue *ubq)
662{
663 return ubq->flags & UBLK_F_USER_RECOVERY;
664}
665
666static inline bool ublk_can_use_recovery(struct ublk_device *ub)
667{
668 return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
669}
670
671static void ublk_free_disk(struct gendisk *disk)
672{
673 struct ublk_device *ub = disk->private_data;
674
675 clear_bit(UB_STATE_USED, &ub->state);
676 ublk_put_device(ub);
677}
678
679static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
680 unsigned int *owner_gid)
681{
682 kuid_t uid;
683 kgid_t gid;
684
685 current_uid_gid(&uid, &gid);
686
687 *owner_uid = from_kuid(&init_user_ns, uid);
688 *owner_gid = from_kgid(&init_user_ns, gid);
689}
690
691static int ublk_open(struct gendisk *disk, blk_mode_t mode)
692{
693 struct ublk_device *ub = disk->private_data;
694
695 if (capable(CAP_SYS_ADMIN))
696 return 0;
697
698 /*
699 * If it is one unprivileged device, only owner can open
700 * the disk. Otherwise it could be one trap made by one
701 * evil user who grants this disk's privileges to other
702 * users deliberately.
703 *
704 * This way is reasonable too given anyone can create
705 * unprivileged device, and no need other's grant.
706 */
707 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
708 unsigned int curr_uid, curr_gid;
709
710 ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
711
712 if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
713 ub->dev_info.owner_gid)
714 return -EPERM;
715 }
716
717 return 0;
718}
719
720static const struct block_device_operations ub_fops = {
721 .owner = THIS_MODULE,
722 .open = ublk_open,
723 .free_disk = ublk_free_disk,
724 .report_zones = ublk_report_zones,
725};
726
727#define UBLK_MAX_PIN_PAGES 32
728
729struct ublk_io_iter {
730 struct page *pages[UBLK_MAX_PIN_PAGES];
731 struct bio *bio;
732 struct bvec_iter iter;
733};
734
735/* return how many pages are copied */
736static void ublk_copy_io_pages(struct ublk_io_iter *data,
737 size_t total, size_t pg_off, int dir)
738{
739 unsigned done = 0;
740 unsigned pg_idx = 0;
741
742 while (done < total) {
743 struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
744 unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
745 (unsigned)(PAGE_SIZE - pg_off));
746 void *bv_buf = bvec_kmap_local(&bv);
747 void *pg_buf = kmap_local_page(data->pages[pg_idx]);
748
749 if (dir == ITER_DEST)
750 memcpy(pg_buf + pg_off, bv_buf, bytes);
751 else
752 memcpy(bv_buf, pg_buf + pg_off, bytes);
753
754 kunmap_local(pg_buf);
755 kunmap_local(bv_buf);
756
757 /* advance page array */
758 pg_off += bytes;
759 if (pg_off == PAGE_SIZE) {
760 pg_idx += 1;
761 pg_off = 0;
762 }
763
764 done += bytes;
765
766 /* advance bio */
767 bio_advance_iter_single(data->bio, &data->iter, bytes);
768 if (!data->iter.bi_size) {
769 data->bio = data->bio->bi_next;
770 if (data->bio == NULL)
771 break;
772 data->iter = data->bio->bi_iter;
773 }
774 }
775}
776
777static bool ublk_advance_io_iter(const struct request *req,
778 struct ublk_io_iter *iter, unsigned int offset)
779{
780 struct bio *bio = req->bio;
781
782 for_each_bio(bio) {
783 if (bio->bi_iter.bi_size > offset) {
784 iter->bio = bio;
785 iter->iter = bio->bi_iter;
786 bio_advance_iter(iter->bio, &iter->iter, offset);
787 return true;
788 }
789 offset -= bio->bi_iter.bi_size;
790 }
791 return false;
792}
793
794/*
795 * Copy data between request pages and io_iter, and 'offset'
796 * is the start point of linear offset of request.
797 */
798static size_t ublk_copy_user_pages(const struct request *req,
799 unsigned offset, struct iov_iter *uiter, int dir)
800{
801 struct ublk_io_iter iter;
802 size_t done = 0;
803
804 if (!ublk_advance_io_iter(req, &iter, offset))
805 return 0;
806
807 while (iov_iter_count(uiter) && iter.bio) {
808 unsigned nr_pages;
809 ssize_t len;
810 size_t off;
811 int i;
812
813 len = iov_iter_get_pages2(uiter, iter.pages,
814 iov_iter_count(uiter),
815 UBLK_MAX_PIN_PAGES, &off);
816 if (len <= 0)
817 return done;
818
819 ublk_copy_io_pages(&iter, len, off, dir);
820 nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
821 for (i = 0; i < nr_pages; i++) {
822 if (dir == ITER_DEST)
823 set_page_dirty(iter.pages[i]);
824 put_page(iter.pages[i]);
825 }
826 done += len;
827 }
828
829 return done;
830}
831
832static inline bool ublk_need_map_req(const struct request *req)
833{
834 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
835}
836
837static inline bool ublk_need_unmap_req(const struct request *req)
838{
839 return ublk_rq_has_data(req) &&
840 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
841}
842
843static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
844 struct ublk_io *io)
845{
846 const unsigned int rq_bytes = blk_rq_bytes(req);
847
848 if (ublk_support_user_copy(ubq))
849 return rq_bytes;
850
851 /*
852 * no zero copy, we delay copy WRITE request data into ublksrv
853 * context and the big benefit is that pinning pages in current
854 * context is pretty fast, see ublk_pin_user_pages
855 */
856 if (ublk_need_map_req(req)) {
857 struct iov_iter iter;
858 const int dir = ITER_DEST;
859
860 import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
861 return ublk_copy_user_pages(req, 0, &iter, dir);
862 }
863 return rq_bytes;
864}
865
866static int ublk_unmap_io(const struct ublk_queue *ubq,
867 const struct request *req,
868 struct ublk_io *io)
869{
870 const unsigned int rq_bytes = blk_rq_bytes(req);
871
872 if (ublk_support_user_copy(ubq))
873 return rq_bytes;
874
875 if (ublk_need_unmap_req(req)) {
876 struct iov_iter iter;
877 const int dir = ITER_SOURCE;
878
879 WARN_ON_ONCE(io->res > rq_bytes);
880
881 import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
882 return ublk_copy_user_pages(req, 0, &iter, dir);
883 }
884 return rq_bytes;
885}
886
887static inline unsigned int ublk_req_build_flags(struct request *req)
888{
889 unsigned flags = 0;
890
891 if (req->cmd_flags & REQ_FAILFAST_DEV)
892 flags |= UBLK_IO_F_FAILFAST_DEV;
893
894 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
895 flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
896
897 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
898 flags |= UBLK_IO_F_FAILFAST_DRIVER;
899
900 if (req->cmd_flags & REQ_META)
901 flags |= UBLK_IO_F_META;
902
903 if (req->cmd_flags & REQ_FUA)
904 flags |= UBLK_IO_F_FUA;
905
906 if (req->cmd_flags & REQ_NOUNMAP)
907 flags |= UBLK_IO_F_NOUNMAP;
908
909 if (req->cmd_flags & REQ_SWAP)
910 flags |= UBLK_IO_F_SWAP;
911
912 return flags;
913}
914
915static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
916{
917 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
918 struct ublk_io *io = &ubq->ios[req->tag];
919 enum req_op op = req_op(req);
920 u32 ublk_op;
921
922 if (!ublk_queue_is_zoned(ubq) &&
923 (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
924 return BLK_STS_IOERR;
925
926 switch (req_op(req)) {
927 case REQ_OP_READ:
928 ublk_op = UBLK_IO_OP_READ;
929 break;
930 case REQ_OP_WRITE:
931 ublk_op = UBLK_IO_OP_WRITE;
932 break;
933 case REQ_OP_FLUSH:
934 ublk_op = UBLK_IO_OP_FLUSH;
935 break;
936 case REQ_OP_DISCARD:
937 ublk_op = UBLK_IO_OP_DISCARD;
938 break;
939 case REQ_OP_WRITE_ZEROES:
940 ublk_op = UBLK_IO_OP_WRITE_ZEROES;
941 break;
942 default:
943 if (ublk_queue_is_zoned(ubq))
944 return ublk_setup_iod_zoned(ubq, req);
945 return BLK_STS_IOERR;
946 }
947
948 /* need to translate since kernel may change */
949 iod->op_flags = ublk_op | ublk_req_build_flags(req);
950 iod->nr_sectors = blk_rq_sectors(req);
951 iod->start_sector = blk_rq_pos(req);
952 iod->addr = io->addr;
953
954 return BLK_STS_OK;
955}
956
957static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
958 struct io_uring_cmd *ioucmd)
959{
960 return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
961}
962
963static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
964{
965 return ubq->ubq_daemon->flags & PF_EXITING;
966}
967
968/* todo: handle partial completion */
969static inline void __ublk_complete_rq(struct request *req)
970{
971 struct ublk_queue *ubq = req->mq_hctx->driver_data;
972 struct ublk_io *io = &ubq->ios[req->tag];
973 unsigned int unmapped_bytes;
974 blk_status_t res = BLK_STS_OK;
975
976 /* called from ublk_abort_queue() code path */
977 if (io->flags & UBLK_IO_FLAG_ABORTED) {
978 res = BLK_STS_IOERR;
979 goto exit;
980 }
981
982 /* failed read IO if nothing is read */
983 if (!io->res && req_op(req) == REQ_OP_READ)
984 io->res = -EIO;
985
986 if (io->res < 0) {
987 res = errno_to_blk_status(io->res);
988 goto exit;
989 }
990
991 /*
992 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
993 * directly.
994 *
995 * Both the two needn't unmap.
996 */
997 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
998 req_op(req) != REQ_OP_DRV_IN)
999 goto exit;
1000
1001 /* for READ request, writing data in iod->addr to rq buffers */
1002 unmapped_bytes = ublk_unmap_io(ubq, req, io);
1003
1004 /*
1005 * Extremely impossible since we got data filled in just before
1006 *
1007 * Re-read simply for this unlikely case.
1008 */
1009 if (unlikely(unmapped_bytes < io->res))
1010 io->res = unmapped_bytes;
1011
1012 if (blk_update_request(req, BLK_STS_OK, io->res))
1013 blk_mq_requeue_request(req, true);
1014 else
1015 __blk_mq_end_request(req, BLK_STS_OK);
1016
1017 return;
1018exit:
1019 blk_mq_end_request(req, res);
1020}
1021
1022static void ublk_complete_rq(struct kref *ref)
1023{
1024 struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
1025 ref);
1026 struct request *req = blk_mq_rq_from_pdu(data);
1027
1028 __ublk_complete_rq(req);
1029}
1030
1031/*
1032 * Since __ublk_rq_task_work always fails requests immediately during
1033 * exiting, __ublk_fail_req() is only called from abort context during
1034 * exiting. So lock is unnecessary.
1035 *
1036 * Also aborting may not be started yet, keep in mind that one failed
1037 * request may be issued by block layer again.
1038 */
1039static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1040 struct request *req)
1041{
1042 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
1043
1044 if (ublk_queue_can_use_recovery_reissue(ubq))
1045 blk_mq_requeue_request(req, false);
1046 else
1047 ublk_put_req_ref(ubq, req);
1048}
1049
1050static void ubq_complete_io_cmd(struct ublk_io *io, int res,
1051 unsigned issue_flags)
1052{
1053 /* mark this cmd owned by ublksrv */
1054 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
1055
1056 /*
1057 * clear ACTIVE since we are done with this sqe/cmd slot
1058 * We can only accept io cmd in case of being not active.
1059 */
1060 io->flags &= ~UBLK_IO_FLAG_ACTIVE;
1061
1062 /* tell ublksrv one io request is coming */
1063 io_uring_cmd_done(io->cmd, res, 0, issue_flags);
1064}
1065
1066#define UBLK_REQUEUE_DELAY_MS 3
1067
1068static inline void __ublk_abort_rq(struct ublk_queue *ubq,
1069 struct request *rq)
1070{
1071 /* We cannot process this rq so just requeue it. */
1072 if (ublk_queue_can_use_recovery(ubq))
1073 blk_mq_requeue_request(rq, false);
1074 else
1075 blk_mq_end_request(rq, BLK_STS_IOERR);
1076}
1077
1078static inline void __ublk_rq_task_work(struct request *req,
1079 unsigned issue_flags)
1080{
1081 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1082 int tag = req->tag;
1083 struct ublk_io *io = &ubq->ios[tag];
1084 unsigned int mapped_bytes;
1085
1086 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
1087 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1088 ublk_get_iod(ubq, req->tag)->addr);
1089
1090 /*
1091 * Task is exiting if either:
1092 *
1093 * (1) current != ubq_daemon.
1094 * io_uring_cmd_complete_in_task() tries to run task_work
1095 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
1096 *
1097 * (2) current->flags & PF_EXITING.
1098 */
1099 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
1100 __ublk_abort_rq(ubq, req);
1101 return;
1102 }
1103
1104 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
1105 /*
1106 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
1107 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
1108 * and notify it.
1109 */
1110 if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
1111 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
1112 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
1113 __func__, io->cmd->cmd_op, ubq->q_id,
1114 req->tag, io->flags);
1115 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
1116 return;
1117 }
1118 /*
1119 * We have handled UBLK_IO_NEED_GET_DATA command,
1120 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
1121 * do the copy work.
1122 */
1123 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
1124 /* update iod->addr because ublksrv may have passed a new io buffer */
1125 ublk_get_iod(ubq, req->tag)->addr = io->addr;
1126 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
1127 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1128 ublk_get_iod(ubq, req->tag)->addr);
1129 }
1130
1131 mapped_bytes = ublk_map_io(ubq, req, io);
1132
1133 /* partially mapped, update io descriptor */
1134 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
1135 /*
1136 * Nothing mapped, retry until we succeed.
1137 *
1138 * We may never succeed in mapping any bytes here because
1139 * of OOM. TODO: reserve one buffer with single page pinned
1140 * for providing forward progress guarantee.
1141 */
1142 if (unlikely(!mapped_bytes)) {
1143 blk_mq_requeue_request(req, false);
1144 blk_mq_delay_kick_requeue_list(req->q,
1145 UBLK_REQUEUE_DELAY_MS);
1146 return;
1147 }
1148
1149 ublk_get_iod(ubq, req->tag)->nr_sectors =
1150 mapped_bytes >> 9;
1151 }
1152
1153 ublk_init_req_ref(ubq, req);
1154 ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
1155}
1156
1157static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
1158 unsigned issue_flags)
1159{
1160 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1161 struct ublk_rq_data *data, *tmp;
1162
1163 io_cmds = llist_reverse_order(io_cmds);
1164 llist_for_each_entry_safe(data, tmp, io_cmds, node)
1165 __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
1166}
1167
1168static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
1169{
1170 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1171 struct ublk_queue *ubq = pdu->ubq;
1172
1173 ublk_forward_io_cmds(ubq, issue_flags);
1174}
1175
1176static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
1177{
1178 struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
1179
1180 if (llist_add(&data->node, &ubq->io_cmds)) {
1181 struct ublk_io *io = &ubq->ios[rq->tag];
1182
1183 io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
1184 }
1185}
1186
1187static enum blk_eh_timer_return ublk_timeout(struct request *rq)
1188{
1189 struct ublk_queue *ubq = rq->mq_hctx->driver_data;
1190 unsigned int nr_inflight = 0;
1191 int i;
1192
1193 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
1194 if (!ubq->timeout) {
1195 send_sig(SIGKILL, ubq->ubq_daemon, 0);
1196 ubq->timeout = true;
1197 }
1198
1199 return BLK_EH_DONE;
1200 }
1201
1202 if (!ubq_daemon_is_dying(ubq))
1203 return BLK_EH_RESET_TIMER;
1204
1205 for (i = 0; i < ubq->q_depth; i++) {
1206 struct ublk_io *io = &ubq->ios[i];
1207
1208 if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
1209 nr_inflight++;
1210 }
1211
1212 /* cancelable uring_cmd can't help us if all commands are in-flight */
1213 if (nr_inflight == ubq->q_depth) {
1214 struct ublk_device *ub = ubq->dev;
1215
1216 if (ublk_abort_requests(ub, ubq)) {
1217 if (ublk_can_use_recovery(ub))
1218 schedule_work(&ub->quiesce_work);
1219 else
1220 schedule_work(&ub->stop_work);
1221 }
1222 return BLK_EH_DONE;
1223 }
1224
1225 return BLK_EH_RESET_TIMER;
1226}
1227
1228static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
1229 const struct blk_mq_queue_data *bd)
1230{
1231 struct ublk_queue *ubq = hctx->driver_data;
1232 struct request *rq = bd->rq;
1233 blk_status_t res;
1234
1235 /* fill iod to slot in io cmd buffer */
1236 res = ublk_setup_iod(ubq, rq);
1237 if (unlikely(res != BLK_STS_OK))
1238 return BLK_STS_IOERR;
1239
1240 /* With recovery feature enabled, force_abort is set in
1241 * ublk_stop_dev() before calling del_gendisk(). We have to
1242 * abort all requeued and new rqs here to let del_gendisk()
1243 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1244 * to avoid UAF on io_uring ctx.
1245 *
1246 * Note: force_abort is guaranteed to be seen because it is set
1247 * before request queue is unqiuesced.
1248 */
1249 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
1250 return BLK_STS_IOERR;
1251
1252 if (unlikely(ubq->canceling)) {
1253 __ublk_abort_rq(ubq, rq);
1254 return BLK_STS_OK;
1255 }
1256
1257 blk_mq_start_request(bd->rq);
1258 ublk_queue_cmd(ubq, rq);
1259
1260 return BLK_STS_OK;
1261}
1262
1263static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1264 unsigned int hctx_idx)
1265{
1266 struct ublk_device *ub = driver_data;
1267 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1268
1269 hctx->driver_data = ubq;
1270 return 0;
1271}
1272
1273static const struct blk_mq_ops ublk_mq_ops = {
1274 .queue_rq = ublk_queue_rq,
1275 .init_hctx = ublk_init_hctx,
1276 .timeout = ublk_timeout,
1277};
1278
1279static int ublk_ch_open(struct inode *inode, struct file *filp)
1280{
1281 struct ublk_device *ub = container_of(inode->i_cdev,
1282 struct ublk_device, cdev);
1283
1284 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
1285 return -EBUSY;
1286 filp->private_data = ub;
1287 return 0;
1288}
1289
1290static int ublk_ch_release(struct inode *inode, struct file *filp)
1291{
1292 struct ublk_device *ub = filp->private_data;
1293
1294 clear_bit(UB_STATE_OPEN, &ub->state);
1295 return 0;
1296}
1297
1298/* map pre-allocated per-queue cmd buffer to ublksrv daemon */
1299static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
1300{
1301 struct ublk_device *ub = filp->private_data;
1302 size_t sz = vma->vm_end - vma->vm_start;
1303 unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
1304 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
1305 int q_id, ret = 0;
1306
1307 spin_lock(&ub->lock);
1308 if (!ub->mm)
1309 ub->mm = current->mm;
1310 if (current->mm != ub->mm)
1311 ret = -EINVAL;
1312 spin_unlock(&ub->lock);
1313
1314 if (ret)
1315 return ret;
1316
1317 if (vma->vm_flags & VM_WRITE)
1318 return -EPERM;
1319
1320 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1321 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1322 return -EINVAL;
1323
1324 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1325 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1326 __func__, q_id, current->pid, vma->vm_start,
1327 phys_off, (unsigned long)sz);
1328
1329 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1330 return -EINVAL;
1331
1332 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1333 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1334}
1335
1336static void ublk_commit_completion(struct ublk_device *ub,
1337 const struct ublksrv_io_cmd *ub_cmd)
1338{
1339 u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
1340 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1341 struct ublk_io *io = &ubq->ios[tag];
1342 struct request *req;
1343
1344 /* now this cmd slot is owned by nbd driver */
1345 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
1346 io->res = ub_cmd->result;
1347
1348 /* find the io request and complete */
1349 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1350 if (WARN_ON_ONCE(unlikely(!req)))
1351 return;
1352
1353 if (req_op(req) == REQ_OP_ZONE_APPEND)
1354 req->__sector = ub_cmd->zone_append_lba;
1355
1356 if (likely(!blk_should_fake_timeout(req->q)))
1357 ublk_put_req_ref(ubq, req);
1358}
1359
1360/*
1361 * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
1362 * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
1363 * context, so everything is serialized.
1364 */
1365static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1366{
1367 int i;
1368
1369 for (i = 0; i < ubq->q_depth; i++) {
1370 struct ublk_io *io = &ubq->ios[i];
1371
1372 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1373 struct request *rq;
1374
1375 /*
1376 * Either we fail the request or ublk_rq_task_work_fn
1377 * will do it
1378 */
1379 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1380 if (rq && blk_mq_request_started(rq)) {
1381 io->flags |= UBLK_IO_FLAG_ABORTED;
1382 __ublk_fail_req(ubq, io, rq);
1383 }
1384 }
1385 }
1386}
1387
1388static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
1389{
1390 struct gendisk *disk;
1391
1392 spin_lock(&ubq->cancel_lock);
1393 if (ubq->canceling) {
1394 spin_unlock(&ubq->cancel_lock);
1395 return false;
1396 }
1397 ubq->canceling = true;
1398 spin_unlock(&ubq->cancel_lock);
1399
1400 spin_lock(&ub->lock);
1401 disk = ub->ub_disk;
1402 if (disk)
1403 get_device(disk_to_dev(disk));
1404 spin_unlock(&ub->lock);
1405
1406 /* Our disk has been dead */
1407 if (!disk)
1408 return false;
1409
1410 /* Now we are serialized with ublk_queue_rq() */
1411 blk_mq_quiesce_queue(disk->queue);
1412 /* abort queue is for making forward progress */
1413 ublk_abort_queue(ub, ubq);
1414 blk_mq_unquiesce_queue(disk->queue);
1415 put_device(disk_to_dev(disk));
1416
1417 return true;
1418}
1419
1420static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
1421 unsigned int issue_flags)
1422{
1423 bool done;
1424
1425 if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
1426 return;
1427
1428 spin_lock(&ubq->cancel_lock);
1429 done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
1430 if (!done)
1431 io->flags |= UBLK_IO_FLAG_CANCELED;
1432 spin_unlock(&ubq->cancel_lock);
1433
1434 if (!done)
1435 io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
1436}
1437
1438/*
1439 * The ublk char device won't be closed when calling cancel fn, so both
1440 * ublk device and queue are guaranteed to be live
1441 */
1442static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
1443 unsigned int issue_flags)
1444{
1445 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1446 struct ublk_queue *ubq = pdu->ubq;
1447 struct task_struct *task;
1448 struct ublk_device *ub;
1449 bool need_schedule;
1450 struct ublk_io *io;
1451
1452 if (WARN_ON_ONCE(!ubq))
1453 return;
1454
1455 if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth))
1456 return;
1457
1458 task = io_uring_cmd_get_task(cmd);
1459 if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
1460 return;
1461
1462 ub = ubq->dev;
1463 need_schedule = ublk_abort_requests(ub, ubq);
1464
1465 io = &ubq->ios[pdu->tag];
1466 WARN_ON_ONCE(io->cmd != cmd);
1467 ublk_cancel_cmd(ubq, io, issue_flags);
1468
1469 if (need_schedule) {
1470 if (ublk_can_use_recovery(ub))
1471 schedule_work(&ub->quiesce_work);
1472 else
1473 schedule_work(&ub->stop_work);
1474 }
1475}
1476
1477static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1478{
1479 return ubq->nr_io_ready == ubq->q_depth;
1480}
1481
1482static void ublk_cancel_queue(struct ublk_queue *ubq)
1483{
1484 int i;
1485
1486 for (i = 0; i < ubq->q_depth; i++)
1487 ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED);
1488}
1489
1490/* Cancel all pending commands, must be called after del_gendisk() returns */
1491static void ublk_cancel_dev(struct ublk_device *ub)
1492{
1493 int i;
1494
1495 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1496 ublk_cancel_queue(ublk_get_queue(ub, i));
1497}
1498
1499static bool ublk_check_inflight_rq(struct request *rq, void *data)
1500{
1501 bool *idle = data;
1502
1503 if (blk_mq_request_started(rq)) {
1504 *idle = false;
1505 return false;
1506 }
1507 return true;
1508}
1509
1510static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1511{
1512 bool idle;
1513
1514 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1515 while (true) {
1516 idle = true;
1517 blk_mq_tagset_busy_iter(&ub->tag_set,
1518 ublk_check_inflight_rq, &idle);
1519 if (idle)
1520 break;
1521 msleep(UBLK_REQUEUE_DELAY_MS);
1522 }
1523}
1524
1525static void __ublk_quiesce_dev(struct ublk_device *ub)
1526{
1527 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1528 __func__, ub->dev_info.dev_id,
1529 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1530 "LIVE" : "QUIESCED");
1531 blk_mq_quiesce_queue(ub->ub_disk->queue);
1532 ublk_wait_tagset_rqs_idle(ub);
1533 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1534}
1535
1536static void ublk_quiesce_work_fn(struct work_struct *work)
1537{
1538 struct ublk_device *ub =
1539 container_of(work, struct ublk_device, quiesce_work);
1540
1541 mutex_lock(&ub->mutex);
1542 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1543 goto unlock;
1544 __ublk_quiesce_dev(ub);
1545 unlock:
1546 mutex_unlock(&ub->mutex);
1547 ublk_cancel_dev(ub);
1548}
1549
1550static void ublk_unquiesce_dev(struct ublk_device *ub)
1551{
1552 int i;
1553
1554 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1555 __func__, ub->dev_info.dev_id,
1556 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1557 "LIVE" : "QUIESCED");
1558 /* quiesce_work has run. We let requeued rqs be aborted
1559 * before running fallback_wq. "force_abort" must be seen
1560 * after request queue is unqiuesced. Then del_gendisk()
1561 * can move on.
1562 */
1563 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1564 ublk_get_queue(ub, i)->force_abort = true;
1565
1566 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1567 /* We may have requeued some rqs in ublk_quiesce_queue() */
1568 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1569}
1570
1571static void ublk_stop_dev(struct ublk_device *ub)
1572{
1573 struct gendisk *disk;
1574
1575 mutex_lock(&ub->mutex);
1576 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1577 goto unlock;
1578 if (ublk_can_use_recovery(ub)) {
1579 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1580 __ublk_quiesce_dev(ub);
1581 ublk_unquiesce_dev(ub);
1582 }
1583 del_gendisk(ub->ub_disk);
1584
1585 /* Sync with ublk_abort_queue() by holding the lock */
1586 spin_lock(&ub->lock);
1587 disk = ub->ub_disk;
1588 ub->dev_info.state = UBLK_S_DEV_DEAD;
1589 ub->dev_info.ublksrv_pid = -1;
1590 ub->ub_disk = NULL;
1591 spin_unlock(&ub->lock);
1592 put_disk(disk);
1593 unlock:
1594 mutex_unlock(&ub->mutex);
1595 ublk_cancel_dev(ub);
1596}
1597
1598/* device can only be started after all IOs are ready */
1599static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1600{
1601 mutex_lock(&ub->mutex);
1602 ubq->nr_io_ready++;
1603 if (ublk_queue_ready(ubq)) {
1604 ubq->ubq_daemon = current;
1605 get_task_struct(ubq->ubq_daemon);
1606 ub->nr_queues_ready++;
1607
1608 if (capable(CAP_SYS_ADMIN))
1609 ub->nr_privileged_daemon++;
1610 }
1611 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1612 complete_all(&ub->completion);
1613 mutex_unlock(&ub->mutex);
1614}
1615
1616static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1617 int tag)
1618{
1619 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1620 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1621
1622 ublk_queue_cmd(ubq, req);
1623}
1624
1625static inline int ublk_check_cmd_op(u32 cmd_op)
1626{
1627 u32 ioc_type = _IOC_TYPE(cmd_op);
1628
1629 if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
1630 return -EOPNOTSUPP;
1631
1632 if (ioc_type != 'u' && ioc_type != 0)
1633 return -EOPNOTSUPP;
1634
1635 return 0;
1636}
1637
1638static inline void ublk_fill_io_cmd(struct ublk_io *io,
1639 struct io_uring_cmd *cmd, unsigned long buf_addr)
1640{
1641 io->cmd = cmd;
1642 io->flags |= UBLK_IO_FLAG_ACTIVE;
1643 io->addr = buf_addr;
1644}
1645
1646static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
1647 unsigned int issue_flags,
1648 struct ublk_queue *ubq, unsigned int tag)
1649{
1650 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1651
1652 /*
1653 * Safe to refer to @ubq since ublk_queue won't be died until its
1654 * commands are completed
1655 */
1656 pdu->ubq = ubq;
1657 pdu->tag = tag;
1658 io_uring_cmd_mark_cancelable(cmd, issue_flags);
1659}
1660
1661static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
1662 unsigned int issue_flags,
1663 const struct ublksrv_io_cmd *ub_cmd)
1664{
1665 struct ublk_device *ub = cmd->file->private_data;
1666 struct ublk_queue *ubq;
1667 struct ublk_io *io;
1668 u32 cmd_op = cmd->cmd_op;
1669 unsigned tag = ub_cmd->tag;
1670 int ret = -EINVAL;
1671 struct request *req;
1672
1673 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1674 __func__, cmd->cmd_op, ub_cmd->q_id, tag,
1675 ub_cmd->result);
1676
1677 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1678 goto out;
1679
1680 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1681 if (!ubq || ub_cmd->q_id != ubq->q_id)
1682 goto out;
1683
1684 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1685 goto out;
1686
1687 if (tag >= ubq->q_depth)
1688 goto out;
1689
1690 io = &ubq->ios[tag];
1691
1692 /* there is pending io cmd, something must be wrong */
1693 if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1694 ret = -EBUSY;
1695 goto out;
1696 }
1697
1698 /*
1699 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1700 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1701 */
1702 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1703 ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
1704 goto out;
1705
1706 ret = ublk_check_cmd_op(cmd_op);
1707 if (ret)
1708 goto out;
1709
1710 ret = -EINVAL;
1711 switch (_IOC_NR(cmd_op)) {
1712 case UBLK_IO_FETCH_REQ:
1713 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1714 if (ublk_queue_ready(ubq)) {
1715 ret = -EBUSY;
1716 goto out;
1717 }
1718 /*
1719 * The io is being handled by server, so COMMIT_RQ is expected
1720 * instead of FETCH_REQ
1721 */
1722 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1723 goto out;
1724
1725 if (!ublk_support_user_copy(ubq)) {
1726 /*
1727 * FETCH_RQ has to provide IO buffer if NEED GET
1728 * DATA is not enabled
1729 */
1730 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1731 goto out;
1732 } else if (ub_cmd->addr) {
1733 /* User copy requires addr to be unset */
1734 ret = -EINVAL;
1735 goto out;
1736 }
1737
1738 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1739 ublk_mark_io_ready(ub, ubq);
1740 break;
1741 case UBLK_IO_COMMIT_AND_FETCH_REQ:
1742 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1743
1744 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1745 goto out;
1746
1747 if (!ublk_support_user_copy(ubq)) {
1748 /*
1749 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
1750 * NEED GET DATA is not enabled or it is Read IO.
1751 */
1752 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
1753 req_op(req) == REQ_OP_READ))
1754 goto out;
1755 } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
1756 /*
1757 * User copy requires addr to be unset when command is
1758 * not zone append
1759 */
1760 ret = -EINVAL;
1761 goto out;
1762 }
1763
1764 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1765 ublk_commit_completion(ub, ub_cmd);
1766 break;
1767 case UBLK_IO_NEED_GET_DATA:
1768 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1769 goto out;
1770 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1771 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
1772 break;
1773 default:
1774 goto out;
1775 }
1776 ublk_prep_cancel(cmd, issue_flags, ubq, tag);
1777 return -EIOCBQUEUED;
1778
1779 out:
1780 io_uring_cmd_done(cmd, ret, 0, issue_flags);
1781 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1782 __func__, cmd_op, tag, ret, io->flags);
1783 return -EIOCBQUEUED;
1784}
1785
1786static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
1787 struct ublk_queue *ubq, int tag, size_t offset)
1788{
1789 struct request *req;
1790
1791 if (!ublk_need_req_ref(ubq))
1792 return NULL;
1793
1794 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1795 if (!req)
1796 return NULL;
1797
1798 if (!ublk_get_req_ref(ubq, req))
1799 return NULL;
1800
1801 if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
1802 goto fail_put;
1803
1804 if (!ublk_rq_has_data(req))
1805 goto fail_put;
1806
1807 if (offset > blk_rq_bytes(req))
1808 goto fail_put;
1809
1810 return req;
1811fail_put:
1812 ublk_put_req_ref(ubq, req);
1813 return NULL;
1814}
1815
1816static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
1817 unsigned int issue_flags)
1818{
1819 /*
1820 * Not necessary for async retry, but let's keep it simple and always
1821 * copy the values to avoid any potential reuse.
1822 */
1823 const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
1824 const struct ublksrv_io_cmd ub_cmd = {
1825 .q_id = READ_ONCE(ub_src->q_id),
1826 .tag = READ_ONCE(ub_src->tag),
1827 .result = READ_ONCE(ub_src->result),
1828 .addr = READ_ONCE(ub_src->addr)
1829 };
1830
1831 WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
1832
1833 return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
1834}
1835
1836static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
1837 unsigned int issue_flags)
1838{
1839 ublk_ch_uring_cmd_local(cmd, issue_flags);
1840}
1841
1842static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1843{
1844 if (unlikely(issue_flags & IO_URING_F_CANCEL)) {
1845 ublk_uring_cmd_cancel_fn(cmd, issue_flags);
1846 return 0;
1847 }
1848
1849 /* well-implemented server won't run into unlocked */
1850 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
1851 io_uring_cmd_complete_in_task(cmd, ublk_ch_uring_cmd_cb);
1852 return -EIOCBQUEUED;
1853 }
1854
1855 return ublk_ch_uring_cmd_local(cmd, issue_flags);
1856}
1857
1858static inline bool ublk_check_ubuf_dir(const struct request *req,
1859 int ubuf_dir)
1860{
1861 /* copy ubuf to request pages */
1862 if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) &&
1863 ubuf_dir == ITER_SOURCE)
1864 return true;
1865
1866 /* copy request pages to ubuf */
1867 if ((req_op(req) == REQ_OP_WRITE ||
1868 req_op(req) == REQ_OP_ZONE_APPEND) &&
1869 ubuf_dir == ITER_DEST)
1870 return true;
1871
1872 return false;
1873}
1874
1875static struct request *ublk_check_and_get_req(struct kiocb *iocb,
1876 struct iov_iter *iter, size_t *off, int dir)
1877{
1878 struct ublk_device *ub = iocb->ki_filp->private_data;
1879 struct ublk_queue *ubq;
1880 struct request *req;
1881 size_t buf_off;
1882 u16 tag, q_id;
1883
1884 if (!ub)
1885 return ERR_PTR(-EACCES);
1886
1887 if (!user_backed_iter(iter))
1888 return ERR_PTR(-EACCES);
1889
1890 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1891 return ERR_PTR(-EACCES);
1892
1893 tag = ublk_pos_to_tag(iocb->ki_pos);
1894 q_id = ublk_pos_to_hwq(iocb->ki_pos);
1895 buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
1896
1897 if (q_id >= ub->dev_info.nr_hw_queues)
1898 return ERR_PTR(-EINVAL);
1899
1900 ubq = ublk_get_queue(ub, q_id);
1901 if (!ubq)
1902 return ERR_PTR(-EINVAL);
1903
1904 if (tag >= ubq->q_depth)
1905 return ERR_PTR(-EINVAL);
1906
1907 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
1908 if (!req)
1909 return ERR_PTR(-EINVAL);
1910
1911 if (!req->mq_hctx || !req->mq_hctx->driver_data)
1912 goto fail;
1913
1914 if (!ublk_check_ubuf_dir(req, dir))
1915 goto fail;
1916
1917 *off = buf_off;
1918 return req;
1919fail:
1920 ublk_put_req_ref(ubq, req);
1921 return ERR_PTR(-EACCES);
1922}
1923
1924static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
1925{
1926 struct ublk_queue *ubq;
1927 struct request *req;
1928 size_t buf_off;
1929 size_t ret;
1930
1931 req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
1932 if (IS_ERR(req))
1933 return PTR_ERR(req);
1934
1935 ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
1936 ubq = req->mq_hctx->driver_data;
1937 ublk_put_req_ref(ubq, req);
1938
1939 return ret;
1940}
1941
1942static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
1943{
1944 struct ublk_queue *ubq;
1945 struct request *req;
1946 size_t buf_off;
1947 size_t ret;
1948
1949 req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
1950 if (IS_ERR(req))
1951 return PTR_ERR(req);
1952
1953 ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
1954 ubq = req->mq_hctx->driver_data;
1955 ublk_put_req_ref(ubq, req);
1956
1957 return ret;
1958}
1959
1960static const struct file_operations ublk_ch_fops = {
1961 .owner = THIS_MODULE,
1962 .open = ublk_ch_open,
1963 .release = ublk_ch_release,
1964 .llseek = no_llseek,
1965 .read_iter = ublk_ch_read_iter,
1966 .write_iter = ublk_ch_write_iter,
1967 .uring_cmd = ublk_ch_uring_cmd,
1968 .mmap = ublk_ch_mmap,
1969};
1970
1971static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
1972{
1973 int size = ublk_queue_cmd_buf_size(ub, q_id);
1974 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1975
1976 if (ubq->ubq_daemon)
1977 put_task_struct(ubq->ubq_daemon);
1978 if (ubq->io_cmd_buf)
1979 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
1980}
1981
1982static int ublk_init_queue(struct ublk_device *ub, int q_id)
1983{
1984 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1985 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
1986 void *ptr;
1987 int size;
1988
1989 spin_lock_init(&ubq->cancel_lock);
1990 ubq->flags = ub->dev_info.flags;
1991 ubq->q_id = q_id;
1992 ubq->q_depth = ub->dev_info.queue_depth;
1993 size = ublk_queue_cmd_buf_size(ub, q_id);
1994
1995 ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
1996 if (!ptr)
1997 return -ENOMEM;
1998
1999 ubq->io_cmd_buf = ptr;
2000 ubq->dev = ub;
2001 return 0;
2002}
2003
2004static void ublk_deinit_queues(struct ublk_device *ub)
2005{
2006 int nr_queues = ub->dev_info.nr_hw_queues;
2007 int i;
2008
2009 if (!ub->__queues)
2010 return;
2011
2012 for (i = 0; i < nr_queues; i++)
2013 ublk_deinit_queue(ub, i);
2014 kfree(ub->__queues);
2015}
2016
2017static int ublk_init_queues(struct ublk_device *ub)
2018{
2019 int nr_queues = ub->dev_info.nr_hw_queues;
2020 int depth = ub->dev_info.queue_depth;
2021 int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
2022 int i, ret = -ENOMEM;
2023
2024 ub->queue_size = ubq_size;
2025 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
2026 if (!ub->__queues)
2027 return ret;
2028
2029 for (i = 0; i < nr_queues; i++) {
2030 if (ublk_init_queue(ub, i))
2031 goto fail;
2032 }
2033
2034 init_completion(&ub->completion);
2035 return 0;
2036
2037 fail:
2038 ublk_deinit_queues(ub);
2039 return ret;
2040}
2041
2042static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
2043{
2044 int i = idx;
2045 int err;
2046
2047 spin_lock(&ublk_idr_lock);
2048 /* allocate id, if @id >= 0, we're requesting that specific id */
2049 if (i >= 0) {
2050 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
2051 if (err == -ENOSPC)
2052 err = -EEXIST;
2053 } else {
2054 err = idr_alloc(&ublk_index_idr, ub, 0, UBLK_MAX_UBLKS,
2055 GFP_NOWAIT);
2056 }
2057 spin_unlock(&ublk_idr_lock);
2058
2059 if (err >= 0)
2060 ub->ub_number = err;
2061
2062 return err;
2063}
2064
2065static void ublk_free_dev_number(struct ublk_device *ub)
2066{
2067 spin_lock(&ublk_idr_lock);
2068 idr_remove(&ublk_index_idr, ub->ub_number);
2069 wake_up_all(&ublk_idr_wq);
2070 spin_unlock(&ublk_idr_lock);
2071}
2072
2073static void ublk_cdev_rel(struct device *dev)
2074{
2075 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
2076
2077 blk_mq_free_tag_set(&ub->tag_set);
2078 ublk_deinit_queues(ub);
2079 ublk_free_dev_number(ub);
2080 mutex_destroy(&ub->mutex);
2081 kfree(ub);
2082}
2083
2084static int ublk_add_chdev(struct ublk_device *ub)
2085{
2086 struct device *dev = &ub->cdev_dev;
2087 int minor = ub->ub_number;
2088 int ret;
2089
2090 dev->parent = ublk_misc.this_device;
2091 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
2092 dev->class = &ublk_chr_class;
2093 dev->release = ublk_cdev_rel;
2094 device_initialize(dev);
2095
2096 ret = dev_set_name(dev, "ublkc%d", minor);
2097 if (ret)
2098 goto fail;
2099
2100 cdev_init(&ub->cdev, &ublk_ch_fops);
2101 ret = cdev_device_add(&ub->cdev, dev);
2102 if (ret)
2103 goto fail;
2104
2105 ublks_added++;
2106 return 0;
2107 fail:
2108 put_device(dev);
2109 return ret;
2110}
2111
2112static void ublk_stop_work_fn(struct work_struct *work)
2113{
2114 struct ublk_device *ub =
2115 container_of(work, struct ublk_device, stop_work);
2116
2117 ublk_stop_dev(ub);
2118}
2119
2120/* align max io buffer size with PAGE_SIZE */
2121static void ublk_align_max_io_size(struct ublk_device *ub)
2122{
2123 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
2124
2125 ub->dev_info.max_io_buf_bytes =
2126 round_down(max_io_bytes, PAGE_SIZE);
2127}
2128
2129static int ublk_add_tag_set(struct ublk_device *ub)
2130{
2131 ub->tag_set.ops = &ublk_mq_ops;
2132 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
2133 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
2134 ub->tag_set.numa_node = NUMA_NO_NODE;
2135 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
2136 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2137 ub->tag_set.driver_data = ub;
2138 return blk_mq_alloc_tag_set(&ub->tag_set);
2139}
2140
2141static void ublk_remove(struct ublk_device *ub)
2142{
2143 ublk_stop_dev(ub);
2144 cancel_work_sync(&ub->stop_work);
2145 cancel_work_sync(&ub->quiesce_work);
2146 cdev_device_del(&ub->cdev, &ub->cdev_dev);
2147 ublk_put_device(ub);
2148 ublks_added--;
2149}
2150
2151static struct ublk_device *ublk_get_device_from_id(int idx)
2152{
2153 struct ublk_device *ub = NULL;
2154
2155 if (idx < 0)
2156 return NULL;
2157
2158 spin_lock(&ublk_idr_lock);
2159 ub = idr_find(&ublk_index_idr, idx);
2160 if (ub)
2161 ub = ublk_get_device(ub);
2162 spin_unlock(&ublk_idr_lock);
2163
2164 return ub;
2165}
2166
2167static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
2168{
2169 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2170 const struct ublk_param_basic *p = &ub->params.basic;
2171 int ublksrv_pid = (int)header->data[0];
2172 struct queue_limits lim = {
2173 .logical_block_size = 1 << p->logical_bs_shift,
2174 .physical_block_size = 1 << p->physical_bs_shift,
2175 .io_min = 1 << p->io_min_shift,
2176 .io_opt = 1 << p->io_opt_shift,
2177 .max_hw_sectors = p->max_sectors,
2178 .chunk_sectors = p->chunk_sectors,
2179 .virt_boundary_mask = p->virt_boundary_mask,
2180 .max_segments = USHRT_MAX,
2181 .max_segment_size = UINT_MAX,
2182 };
2183 struct gendisk *disk;
2184 int ret = -EINVAL;
2185
2186 if (ublksrv_pid <= 0)
2187 return -EINVAL;
2188 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
2189 return -EINVAL;
2190
2191 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
2192 const struct ublk_param_discard *pd = &ub->params.discard;
2193
2194 lim.discard_alignment = pd->discard_alignment;
2195 lim.discard_granularity = pd->discard_granularity;
2196 lim.max_hw_discard_sectors = pd->max_discard_sectors;
2197 lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors;
2198 lim.max_discard_segments = pd->max_discard_segments;
2199 }
2200
2201 if (ub->params.types & UBLK_PARAM_TYPE_ZONED) {
2202 const struct ublk_param_zoned *p = &ub->params.zoned;
2203
2204 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
2205 return -EOPNOTSUPP;
2206
2207 lim.zoned = true;
2208 lim.max_active_zones = p->max_active_zones;
2209 lim.max_open_zones = p->max_open_zones;
2210 lim.max_zone_append_sectors = p->max_zone_append_sectors;
2211 }
2212
2213 if (wait_for_completion_interruptible(&ub->completion) != 0)
2214 return -EINTR;
2215
2216 mutex_lock(&ub->mutex);
2217 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
2218 test_bit(UB_STATE_USED, &ub->state)) {
2219 ret = -EEXIST;
2220 goto out_unlock;
2221 }
2222
2223 disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL);
2224 if (IS_ERR(disk)) {
2225 ret = PTR_ERR(disk);
2226 goto out_unlock;
2227 }
2228 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
2229 disk->fops = &ub_fops;
2230 disk->private_data = ub;
2231
2232 ub->dev_info.ublksrv_pid = ublksrv_pid;
2233 ub->ub_disk = disk;
2234
2235 ublk_apply_params(ub);
2236
2237 /* don't probe partitions if any one ubq daemon is un-trusted */
2238 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
2239 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2240
2241 ublk_get_device(ub);
2242 ub->dev_info.state = UBLK_S_DEV_LIVE;
2243
2244 if (ublk_dev_is_zoned(ub)) {
2245 ret = ublk_revalidate_disk_zones(ub);
2246 if (ret)
2247 goto out_put_cdev;
2248 }
2249
2250 ret = add_disk(disk);
2251 if (ret)
2252 goto out_put_cdev;
2253
2254 set_bit(UB_STATE_USED, &ub->state);
2255
2256out_put_cdev:
2257 if (ret) {
2258 ub->dev_info.state = UBLK_S_DEV_DEAD;
2259 ublk_put_device(ub);
2260 }
2261 if (ret)
2262 put_disk(disk);
2263out_unlock:
2264 mutex_unlock(&ub->mutex);
2265 return ret;
2266}
2267
2268static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
2269 struct io_uring_cmd *cmd)
2270{
2271 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2272 void __user *argp = (void __user *)(unsigned long)header->addr;
2273 cpumask_var_t cpumask;
2274 unsigned long queue;
2275 unsigned int retlen;
2276 unsigned int i;
2277 int ret;
2278
2279 if (header->len * BITS_PER_BYTE < nr_cpu_ids)
2280 return -EINVAL;
2281 if (header->len & (sizeof(unsigned long)-1))
2282 return -EINVAL;
2283 if (!header->addr)
2284 return -EINVAL;
2285
2286 queue = header->data[0];
2287 if (queue >= ub->dev_info.nr_hw_queues)
2288 return -EINVAL;
2289
2290 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
2291 return -ENOMEM;
2292
2293 for_each_possible_cpu(i) {
2294 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
2295 cpumask_set_cpu(i, cpumask);
2296 }
2297
2298 ret = -EFAULT;
2299 retlen = min_t(unsigned short, header->len, cpumask_size());
2300 if (copy_to_user(argp, cpumask, retlen))
2301 goto out_free_cpumask;
2302 if (retlen != header->len &&
2303 clear_user(argp + retlen, header->len - retlen))
2304 goto out_free_cpumask;
2305
2306 ret = 0;
2307out_free_cpumask:
2308 free_cpumask_var(cpumask);
2309 return ret;
2310}
2311
2312static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
2313{
2314 pr_devel("%s: dev id %d flags %llx\n", __func__,
2315 info->dev_id, info->flags);
2316 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
2317 info->nr_hw_queues, info->queue_depth);
2318}
2319
2320static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
2321{
2322 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2323 void __user *argp = (void __user *)(unsigned long)header->addr;
2324 struct ublksrv_ctrl_dev_info info;
2325 struct ublk_device *ub;
2326 int ret = -EINVAL;
2327
2328 if (header->len < sizeof(info) || !header->addr)
2329 return -EINVAL;
2330 if (header->queue_id != (u16)-1) {
2331 pr_warn("%s: queue_id is wrong %x\n",
2332 __func__, header->queue_id);
2333 return -EINVAL;
2334 }
2335
2336 if (copy_from_user(&info, argp, sizeof(info)))
2337 return -EFAULT;
2338
2339 if (capable(CAP_SYS_ADMIN))
2340 info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
2341 else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
2342 return -EPERM;
2343
2344 /*
2345 * unprivileged device can't be trusted, but RECOVERY and
2346 * RECOVERY_REISSUE still may hang error handling, so can't
2347 * support recovery features for unprivileged ublk now
2348 *
2349 * TODO: provide forward progress for RECOVERY handler, so that
2350 * unprivileged device can benefit from it
2351 */
2352 if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
2353 info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
2354 UBLK_F_USER_RECOVERY);
2355
2356 /* the created device is always owned by current user */
2357 ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
2358
2359 if (header->dev_id != info.dev_id) {
2360 pr_warn("%s: dev id not match %u %u\n",
2361 __func__, header->dev_id, info.dev_id);
2362 return -EINVAL;
2363 }
2364
2365 if (header->dev_id != U32_MAX && header->dev_id >= UBLK_MAX_UBLKS) {
2366 pr_warn("%s: dev id is too large. Max supported is %d\n",
2367 __func__, UBLK_MAX_UBLKS - 1);
2368 return -EINVAL;
2369 }
2370
2371 ublk_dump_dev_info(&info);
2372
2373 ret = mutex_lock_killable(&ublk_ctl_mutex);
2374 if (ret)
2375 return ret;
2376
2377 ret = -EACCES;
2378 if (ublks_added >= ublks_max)
2379 goto out_unlock;
2380
2381 ret = -ENOMEM;
2382 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
2383 if (!ub)
2384 goto out_unlock;
2385 mutex_init(&ub->mutex);
2386 spin_lock_init(&ub->lock);
2387 INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
2388 INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
2389
2390 ret = ublk_alloc_dev_number(ub, header->dev_id);
2391 if (ret < 0)
2392 goto out_free_ub;
2393
2394 memcpy(&ub->dev_info, &info, sizeof(info));
2395
2396 /* update device id */
2397 ub->dev_info.dev_id = ub->ub_number;
2398
2399 /*
2400 * 64bit flags will be copied back to userspace as feature
2401 * negotiation result, so have to clear flags which driver
2402 * doesn't support yet, then userspace can get correct flags
2403 * (features) to handle.
2404 */
2405 ub->dev_info.flags &= UBLK_F_ALL;
2406
2407 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
2408 UBLK_F_URING_CMD_COMP_IN_TASK;
2409
2410 /* GET_DATA isn't needed any more with USER_COPY */
2411 if (ublk_dev_is_user_copy(ub))
2412 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
2413
2414 /* Zoned storage support requires user copy feature */
2415 if (ublk_dev_is_zoned(ub) &&
2416 (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
2417 ret = -EINVAL;
2418 goto out_free_dev_number;
2419 }
2420
2421 /* We are not ready to support zero copy */
2422 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
2423
2424 ub->dev_info.nr_hw_queues = min_t(unsigned int,
2425 ub->dev_info.nr_hw_queues, nr_cpu_ids);
2426 ublk_align_max_io_size(ub);
2427
2428 ret = ublk_init_queues(ub);
2429 if (ret)
2430 goto out_free_dev_number;
2431
2432 ret = ublk_add_tag_set(ub);
2433 if (ret)
2434 goto out_deinit_queues;
2435
2436 ret = -EFAULT;
2437 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
2438 goto out_free_tag_set;
2439
2440 /*
2441 * Add the char dev so that ublksrv daemon can be setup.
2442 * ublk_add_chdev() will cleanup everything if it fails.
2443 */
2444 ret = ublk_add_chdev(ub);
2445 goto out_unlock;
2446
2447out_free_tag_set:
2448 blk_mq_free_tag_set(&ub->tag_set);
2449out_deinit_queues:
2450 ublk_deinit_queues(ub);
2451out_free_dev_number:
2452 ublk_free_dev_number(ub);
2453out_free_ub:
2454 mutex_destroy(&ub->mutex);
2455 kfree(ub);
2456out_unlock:
2457 mutex_unlock(&ublk_ctl_mutex);
2458 return ret;
2459}
2460
2461static inline bool ublk_idr_freed(int id)
2462{
2463 void *ptr;
2464
2465 spin_lock(&ublk_idr_lock);
2466 ptr = idr_find(&ublk_index_idr, id);
2467 spin_unlock(&ublk_idr_lock);
2468
2469 return ptr == NULL;
2470}
2471
2472static int ublk_ctrl_del_dev(struct ublk_device **p_ub, bool wait)
2473{
2474 struct ublk_device *ub = *p_ub;
2475 int idx = ub->ub_number;
2476 int ret;
2477
2478 ret = mutex_lock_killable(&ublk_ctl_mutex);
2479 if (ret)
2480 return ret;
2481
2482 if (!test_bit(UB_STATE_DELETED, &ub->state)) {
2483 ublk_remove(ub);
2484 set_bit(UB_STATE_DELETED, &ub->state);
2485 }
2486
2487 /* Mark the reference as consumed */
2488 *p_ub = NULL;
2489 ublk_put_device(ub);
2490 mutex_unlock(&ublk_ctl_mutex);
2491
2492 /*
2493 * Wait until the idr is removed, then it can be reused after
2494 * DEL_DEV command is returned.
2495 *
2496 * If we returns because of user interrupt, future delete command
2497 * may come:
2498 *
2499 * - the device number isn't freed, this device won't or needn't
2500 * be deleted again, since UB_STATE_DELETED is set, and device
2501 * will be released after the last reference is dropped
2502 *
2503 * - the device number is freed already, we will not find this
2504 * device via ublk_get_device_from_id()
2505 */
2506 if (wait && wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
2507 return -EINTR;
2508 return 0;
2509}
2510
2511static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
2512{
2513 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2514
2515 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
2516 __func__, cmd->cmd_op, header->dev_id, header->queue_id,
2517 header->data[0], header->addr, header->len);
2518}
2519
2520static int ublk_ctrl_stop_dev(struct ublk_device *ub)
2521{
2522 ublk_stop_dev(ub);
2523 cancel_work_sync(&ub->stop_work);
2524 cancel_work_sync(&ub->quiesce_work);
2525
2526 return 0;
2527}
2528
2529static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
2530 struct io_uring_cmd *cmd)
2531{
2532 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2533 void __user *argp = (void __user *)(unsigned long)header->addr;
2534
2535 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
2536 return -EINVAL;
2537
2538 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
2539 return -EFAULT;
2540
2541 return 0;
2542}
2543
2544/* TYPE_DEVT is readonly, so fill it up before returning to userspace */
2545static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
2546{
2547 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
2548 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
2549
2550 if (ub->ub_disk) {
2551 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
2552 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
2553 } else {
2554 ub->params.devt.disk_major = 0;
2555 ub->params.devt.disk_minor = 0;
2556 }
2557 ub->params.types |= UBLK_PARAM_TYPE_DEVT;
2558}
2559
2560static int ublk_ctrl_get_params(struct ublk_device *ub,
2561 struct io_uring_cmd *cmd)
2562{
2563 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2564 void __user *argp = (void __user *)(unsigned long)header->addr;
2565 struct ublk_params_header ph;
2566 int ret;
2567
2568 if (header->len <= sizeof(ph) || !header->addr)
2569 return -EINVAL;
2570
2571 if (copy_from_user(&ph, argp, sizeof(ph)))
2572 return -EFAULT;
2573
2574 if (ph.len > header->len || !ph.len)
2575 return -EINVAL;
2576
2577 if (ph.len > sizeof(struct ublk_params))
2578 ph.len = sizeof(struct ublk_params);
2579
2580 mutex_lock(&ub->mutex);
2581 ublk_ctrl_fill_params_devt(ub);
2582 if (copy_to_user(argp, &ub->params, ph.len))
2583 ret = -EFAULT;
2584 else
2585 ret = 0;
2586 mutex_unlock(&ub->mutex);
2587
2588 return ret;
2589}
2590
2591static int ublk_ctrl_set_params(struct ublk_device *ub,
2592 struct io_uring_cmd *cmd)
2593{
2594 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2595 void __user *argp = (void __user *)(unsigned long)header->addr;
2596 struct ublk_params_header ph;
2597 int ret = -EFAULT;
2598
2599 if (header->len <= sizeof(ph) || !header->addr)
2600 return -EINVAL;
2601
2602 if (copy_from_user(&ph, argp, sizeof(ph)))
2603 return -EFAULT;
2604
2605 if (ph.len > header->len || !ph.len || !ph.types)
2606 return -EINVAL;
2607
2608 if (ph.len > sizeof(struct ublk_params))
2609 ph.len = sizeof(struct ublk_params);
2610
2611 /* parameters can only be changed when device isn't live */
2612 mutex_lock(&ub->mutex);
2613 if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
2614 ret = -EACCES;
2615 } else if (copy_from_user(&ub->params, argp, ph.len)) {
2616 ret = -EFAULT;
2617 } else {
2618 /* clear all we don't support yet */
2619 ub->params.types &= UBLK_PARAM_TYPE_ALL;
2620 ret = ublk_validate_params(ub);
2621 if (ret)
2622 ub->params.types = 0;
2623 }
2624 mutex_unlock(&ub->mutex);
2625
2626 return ret;
2627}
2628
2629static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2630{
2631 int i;
2632
2633 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2634
2635 /* All old ioucmds have to be completed */
2636 ubq->nr_io_ready = 0;
2637 /* old daemon is PF_EXITING, put it now */
2638 put_task_struct(ubq->ubq_daemon);
2639 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2640 ubq->ubq_daemon = NULL;
2641 ubq->timeout = false;
2642 ubq->canceling = false;
2643
2644 for (i = 0; i < ubq->q_depth; i++) {
2645 struct ublk_io *io = &ubq->ios[i];
2646
2647 /* forget everything now and be ready for new FETCH_REQ */
2648 io->flags = 0;
2649 io->cmd = NULL;
2650 io->addr = 0;
2651 }
2652}
2653
2654static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2655 struct io_uring_cmd *cmd)
2656{
2657 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2658 int ret = -EINVAL;
2659 int i;
2660
2661 mutex_lock(&ub->mutex);
2662 if (!ublk_can_use_recovery(ub))
2663 goto out_unlock;
2664 /*
2665 * START_RECOVERY is only allowd after:
2666 *
2667 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2668 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
2669 * released.
2670 *
2671 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2672 * (a)has quiesced request queue
2673 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2674 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2675 * (d)has completed/camceled all ioucmds owned by ther dying process
2676 */
2677 if (test_bit(UB_STATE_OPEN, &ub->state) ||
2678 ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2679 ret = -EBUSY;
2680 goto out_unlock;
2681 }
2682 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
2683 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2684 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2685 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2686 ub->mm = NULL;
2687 ub->nr_queues_ready = 0;
2688 ub->nr_privileged_daemon = 0;
2689 init_completion(&ub->completion);
2690 ret = 0;
2691 out_unlock:
2692 mutex_unlock(&ub->mutex);
2693 return ret;
2694}
2695
2696static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2697 struct io_uring_cmd *cmd)
2698{
2699 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2700 int ublksrv_pid = (int)header->data[0];
2701 int ret = -EINVAL;
2702
2703 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2704 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2705 /* wait until new ubq_daemon sending all FETCH_REQ */
2706 if (wait_for_completion_interruptible(&ub->completion))
2707 return -EINTR;
2708
2709 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2710 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2711
2712 mutex_lock(&ub->mutex);
2713 if (!ublk_can_use_recovery(ub))
2714 goto out_unlock;
2715
2716 if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2717 ret = -EBUSY;
2718 goto out_unlock;
2719 }
2720 ub->dev_info.ublksrv_pid = ublksrv_pid;
2721 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2722 __func__, ublksrv_pid, header->dev_id);
2723 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2724 pr_devel("%s: queue unquiesced, dev id %d.\n",
2725 __func__, header->dev_id);
2726 blk_mq_kick_requeue_list(ub->ub_disk->queue);
2727 ub->dev_info.state = UBLK_S_DEV_LIVE;
2728 ret = 0;
2729 out_unlock:
2730 mutex_unlock(&ub->mutex);
2731 return ret;
2732}
2733
2734static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
2735{
2736 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2737 void __user *argp = (void __user *)(unsigned long)header->addr;
2738 u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
2739
2740 if (header->len != UBLK_FEATURES_LEN || !header->addr)
2741 return -EINVAL;
2742
2743 if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
2744 return -EFAULT;
2745
2746 return 0;
2747}
2748
2749/*
2750 * All control commands are sent via /dev/ublk-control, so we have to check
2751 * the destination device's permission
2752 */
2753static int ublk_char_dev_permission(struct ublk_device *ub,
2754 const char *dev_path, int mask)
2755{
2756 int err;
2757 struct path path;
2758 struct kstat stat;
2759
2760 err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
2761 if (err)
2762 return err;
2763
2764 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
2765 if (err)
2766 goto exit;
2767
2768 err = -EPERM;
2769 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
2770 goto exit;
2771
2772 err = inode_permission(&nop_mnt_idmap,
2773 d_backing_inode(path.dentry), mask);
2774exit:
2775 path_put(&path);
2776 return err;
2777}
2778
2779static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
2780 struct io_uring_cmd *cmd)
2781{
2782 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
2783 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2784 void __user *argp = (void __user *)(unsigned long)header->addr;
2785 char *dev_path = NULL;
2786 int ret = 0;
2787 int mask;
2788
2789 if (!unprivileged) {
2790 if (!capable(CAP_SYS_ADMIN))
2791 return -EPERM;
2792 /*
2793 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2794 * char_dev_path in payload too, since userspace may not
2795 * know if the specified device is created as unprivileged
2796 * mode.
2797 */
2798 if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
2799 return 0;
2800 }
2801
2802 /*
2803 * User has to provide the char device path for unprivileged ublk
2804 *
2805 * header->addr always points to the dev path buffer, and
2806 * header->dev_path_len records length of dev path buffer.
2807 */
2808 if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
2809 return -EINVAL;
2810
2811 if (header->len < header->dev_path_len)
2812 return -EINVAL;
2813
2814 dev_path = memdup_user_nul(argp, header->dev_path_len);
2815 if (IS_ERR(dev_path))
2816 return PTR_ERR(dev_path);
2817
2818 ret = -EINVAL;
2819 switch (_IOC_NR(cmd->cmd_op)) {
2820 case UBLK_CMD_GET_DEV_INFO:
2821 case UBLK_CMD_GET_DEV_INFO2:
2822 case UBLK_CMD_GET_QUEUE_AFFINITY:
2823 case UBLK_CMD_GET_PARAMS:
2824 case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
2825 mask = MAY_READ;
2826 break;
2827 case UBLK_CMD_START_DEV:
2828 case UBLK_CMD_STOP_DEV:
2829 case UBLK_CMD_ADD_DEV:
2830 case UBLK_CMD_DEL_DEV:
2831 case UBLK_CMD_SET_PARAMS:
2832 case UBLK_CMD_START_USER_RECOVERY:
2833 case UBLK_CMD_END_USER_RECOVERY:
2834 mask = MAY_READ | MAY_WRITE;
2835 break;
2836 default:
2837 goto exit;
2838 }
2839
2840 ret = ublk_char_dev_permission(ub, dev_path, mask);
2841 if (!ret) {
2842 header->len -= header->dev_path_len;
2843 header->addr += header->dev_path_len;
2844 }
2845 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2846 __func__, ub->ub_number, cmd->cmd_op,
2847 ub->dev_info.owner_uid, ub->dev_info.owner_gid,
2848 dev_path, ret);
2849exit:
2850 kfree(dev_path);
2851 return ret;
2852}
2853
2854static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
2855 unsigned int issue_flags)
2856{
2857 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2858 struct ublk_device *ub = NULL;
2859 u32 cmd_op = cmd->cmd_op;
2860 int ret = -EINVAL;
2861
2862 if (issue_flags & IO_URING_F_NONBLOCK)
2863 return -EAGAIN;
2864
2865 ublk_ctrl_cmd_dump(cmd);
2866
2867 if (!(issue_flags & IO_URING_F_SQE128))
2868 goto out;
2869
2870 ret = ublk_check_cmd_op(cmd_op);
2871 if (ret)
2872 goto out;
2873
2874 if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
2875 ret = ublk_ctrl_get_features(cmd);
2876 goto out;
2877 }
2878
2879 if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
2880 ret = -ENODEV;
2881 ub = ublk_get_device_from_id(header->dev_id);
2882 if (!ub)
2883 goto out;
2884
2885 ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
2886 if (ret)
2887 goto put_dev;
2888 }
2889
2890 switch (_IOC_NR(cmd_op)) {
2891 case UBLK_CMD_START_DEV:
2892 ret = ublk_ctrl_start_dev(ub, cmd);
2893 break;
2894 case UBLK_CMD_STOP_DEV:
2895 ret = ublk_ctrl_stop_dev(ub);
2896 break;
2897 case UBLK_CMD_GET_DEV_INFO:
2898 case UBLK_CMD_GET_DEV_INFO2:
2899 ret = ublk_ctrl_get_dev_info(ub, cmd);
2900 break;
2901 case UBLK_CMD_ADD_DEV:
2902 ret = ublk_ctrl_add_dev(cmd);
2903 break;
2904 case UBLK_CMD_DEL_DEV:
2905 ret = ublk_ctrl_del_dev(&ub, true);
2906 break;
2907 case UBLK_U_CMD_DEL_DEV_ASYNC:
2908 ret = ublk_ctrl_del_dev(&ub, false);
2909 break;
2910 case UBLK_CMD_GET_QUEUE_AFFINITY:
2911 ret = ublk_ctrl_get_queue_affinity(ub, cmd);
2912 break;
2913 case UBLK_CMD_GET_PARAMS:
2914 ret = ublk_ctrl_get_params(ub, cmd);
2915 break;
2916 case UBLK_CMD_SET_PARAMS:
2917 ret = ublk_ctrl_set_params(ub, cmd);
2918 break;
2919 case UBLK_CMD_START_USER_RECOVERY:
2920 ret = ublk_ctrl_start_recovery(ub, cmd);
2921 break;
2922 case UBLK_CMD_END_USER_RECOVERY:
2923 ret = ublk_ctrl_end_recovery(ub, cmd);
2924 break;
2925 default:
2926 ret = -ENOTSUPP;
2927 break;
2928 }
2929
2930 put_dev:
2931 if (ub)
2932 ublk_put_device(ub);
2933 out:
2934 io_uring_cmd_done(cmd, ret, 0, issue_flags);
2935 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2936 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
2937 return -EIOCBQUEUED;
2938}
2939
2940static const struct file_operations ublk_ctl_fops = {
2941 .open = nonseekable_open,
2942 .uring_cmd = ublk_ctrl_uring_cmd,
2943 .owner = THIS_MODULE,
2944 .llseek = noop_llseek,
2945};
2946
2947static struct miscdevice ublk_misc = {
2948 .minor = MISC_DYNAMIC_MINOR,
2949 .name = "ublk-control",
2950 .fops = &ublk_ctl_fops,
2951};
2952
2953static int __init ublk_init(void)
2954{
2955 int ret;
2956
2957 BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
2958 UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
2959
2960 init_waitqueue_head(&ublk_idr_wq);
2961
2962 ret = misc_register(&ublk_misc);
2963 if (ret)
2964 return ret;
2965
2966 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
2967 if (ret)
2968 goto unregister_mis;
2969
2970 ret = class_register(&ublk_chr_class);
2971 if (ret)
2972 goto free_chrdev_region;
2973
2974 return 0;
2975
2976free_chrdev_region:
2977 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2978unregister_mis:
2979 misc_deregister(&ublk_misc);
2980 return ret;
2981}
2982
2983static void __exit ublk_exit(void)
2984{
2985 struct ublk_device *ub;
2986 int id;
2987
2988 idr_for_each_entry(&ublk_index_idr, ub, id)
2989 ublk_remove(ub);
2990
2991 class_unregister(&ublk_chr_class);
2992 misc_deregister(&ublk_misc);
2993
2994 idr_destroy(&ublk_index_idr);
2995 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2996}
2997
2998module_init(ublk_init);
2999module_exit(ublk_exit);
3000
3001static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
3002{
3003 return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
3004}
3005
3006static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
3007{
3008 return sysfs_emit(buf, "%u\n", ublks_max);
3009}
3010
3011static const struct kernel_param_ops ublk_max_ublks_ops = {
3012 .set = ublk_set_max_ublks,
3013 .get = ublk_get_max_ublks,
3014};
3015
3016module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
3017MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
3018
3019MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
3020MODULE_LICENSE("GPL");