Loading...
1/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the LGPL.
6 */
7
8#ifndef _LINUX_DEVICE_MAPPER_H
9#define _LINUX_DEVICE_MAPPER_H
10
11#include <linux/bio.h>
12#include <linux/blkdev.h>
13#include <linux/dm-ioctl.h>
14#include <linux/math64.h>
15#include <linux/ratelimit.h>
16
17struct dm_dev;
18struct dm_target;
19struct dm_table;
20struct dm_report_zones_args;
21struct mapped_device;
22struct bio_vec;
23
24/*
25 * Type of table, mapped_device's mempool and request_queue
26 */
27enum dm_queue_mode {
28 DM_TYPE_NONE = 0,
29 DM_TYPE_BIO_BASED = 1,
30 DM_TYPE_REQUEST_BASED = 2,
31 DM_TYPE_DAX_BIO_BASED = 3,
32 DM_TYPE_NVME_BIO_BASED = 4,
33};
34
35typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
36
37union map_info {
38 void *ptr;
39};
40
41/*
42 * In the constructor the target parameter will already have the
43 * table, type, begin and len fields filled in.
44 */
45typedef int (*dm_ctr_fn) (struct dm_target *target,
46 unsigned int argc, char **argv);
47
48/*
49 * The destructor doesn't need to free the dm_target, just
50 * anything hidden ti->private.
51 */
52typedef void (*dm_dtr_fn) (struct dm_target *ti);
53
54/*
55 * The map function must return:
56 * < 0: error
57 * = 0: The target will handle the io by resubmitting it later
58 * = 1: simple remap complete
59 * = 2: The target wants to push back the io
60 */
61typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
62typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
63 struct request *rq,
64 union map_info *map_context,
65 struct request **clone);
66typedef void (*dm_release_clone_request_fn) (struct request *clone,
67 union map_info *map_context);
68
69/*
70 * Returns:
71 * < 0 : error (currently ignored)
72 * 0 : ended successfully
73 * 1 : for some reason the io has still not completed (eg,
74 * multipath target might want to requeue a failed io).
75 * 2 : The target wants to push back the io
76 */
77typedef int (*dm_endio_fn) (struct dm_target *ti,
78 struct bio *bio, blk_status_t *error);
79typedef int (*dm_request_endio_fn) (struct dm_target *ti,
80 struct request *clone, blk_status_t error,
81 union map_info *map_context);
82
83typedef void (*dm_presuspend_fn) (struct dm_target *ti);
84typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
85typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
86typedef int (*dm_preresume_fn) (struct dm_target *ti);
87typedef void (*dm_resume_fn) (struct dm_target *ti);
88
89typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
90 unsigned status_flags, char *result, unsigned maxlen);
91
92typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
93 char *result, unsigned maxlen);
94
95typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
96
97typedef int (*dm_report_zones_fn) (struct dm_target *ti,
98 struct dm_report_zones_args *args,
99 unsigned int nr_zones);
100
101/*
102 * These iteration functions are typically used to check (and combine)
103 * properties of underlying devices.
104 * E.g. Does at least one underlying device support flush?
105 * Does any underlying device not support WRITE_SAME?
106 *
107 * The callout function is called once for each contiguous section of
108 * an underlying device. State can be maintained in *data.
109 * Return non-zero to stop iterating through any further devices.
110 */
111typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
112 struct dm_dev *dev,
113 sector_t start, sector_t len,
114 void *data);
115
116/*
117 * This function must iterate through each section of device used by the
118 * target until it encounters a non-zero return code, which it then returns.
119 * Returns zero if no callout returned non-zero.
120 */
121typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
122 iterate_devices_callout_fn fn,
123 void *data);
124
125typedef void (*dm_io_hints_fn) (struct dm_target *ti,
126 struct queue_limits *limits);
127
128/*
129 * Returns:
130 * 0: The target can handle the next I/O immediately.
131 * 1: The target can't handle the next I/O immediately.
132 */
133typedef int (*dm_busy_fn) (struct dm_target *ti);
134
135/*
136 * Returns:
137 * < 0 : error
138 * >= 0 : the number of bytes accessible at the address
139 */
140typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
141 long nr_pages, void **kaddr, pfn_t *pfn);
142typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
143 void *addr, size_t bytes, struct iov_iter *i);
144typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
145 size_t nr_pages);
146#define PAGE_SECTORS (PAGE_SIZE / 512)
147
148void dm_error(const char *message);
149
150struct dm_dev {
151 struct block_device *bdev;
152 struct dax_device *dax_dev;
153 fmode_t mode;
154 char name[16];
155};
156
157dev_t dm_get_dev_t(const char *path);
158
159/*
160 * Constructors should call these functions to ensure destination devices
161 * are opened/closed correctly.
162 */
163int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
164 struct dm_dev **result);
165void dm_put_device(struct dm_target *ti, struct dm_dev *d);
166
167/*
168 * Information about a target type
169 */
170
171struct target_type {
172 uint64_t features;
173 const char *name;
174 struct module *module;
175 unsigned version[3];
176 dm_ctr_fn ctr;
177 dm_dtr_fn dtr;
178 dm_map_fn map;
179 dm_clone_and_map_request_fn clone_and_map_rq;
180 dm_release_clone_request_fn release_clone_rq;
181 dm_endio_fn end_io;
182 dm_request_endio_fn rq_end_io;
183 dm_presuspend_fn presuspend;
184 dm_presuspend_undo_fn presuspend_undo;
185 dm_postsuspend_fn postsuspend;
186 dm_preresume_fn preresume;
187 dm_resume_fn resume;
188 dm_status_fn status;
189 dm_message_fn message;
190 dm_prepare_ioctl_fn prepare_ioctl;
191#ifdef CONFIG_BLK_DEV_ZONED
192 dm_report_zones_fn report_zones;
193#endif
194 dm_busy_fn busy;
195 dm_iterate_devices_fn iterate_devices;
196 dm_io_hints_fn io_hints;
197 dm_dax_direct_access_fn direct_access;
198 dm_dax_copy_iter_fn dax_copy_from_iter;
199 dm_dax_copy_iter_fn dax_copy_to_iter;
200 dm_dax_zero_page_range_fn dax_zero_page_range;
201
202 /* For internal device-mapper use. */
203 struct list_head list;
204};
205
206/*
207 * Target features
208 */
209
210/*
211 * Any table that contains an instance of this target must have only one.
212 */
213#define DM_TARGET_SINGLETON 0x00000001
214#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
215
216/*
217 * Indicates that a target does not support read-only devices.
218 */
219#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
220#define dm_target_always_writeable(type) \
221 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
222
223/*
224 * Any device that contains a table with an instance of this target may never
225 * have tables containing any different target type.
226 */
227#define DM_TARGET_IMMUTABLE 0x00000004
228#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
229
230/*
231 * Indicates that a target may replace any target; even immutable targets.
232 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
233 */
234#define DM_TARGET_WILDCARD 0x00000008
235#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
236
237/*
238 * A target implements own bio data integrity.
239 */
240#define DM_TARGET_INTEGRITY 0x00000010
241#define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
242
243/*
244 * A target passes integrity data to the lower device.
245 */
246#define DM_TARGET_PASSES_INTEGRITY 0x00000020
247#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
248
249/*
250 * Indicates that a target supports host-managed zoned block devices.
251 */
252#define DM_TARGET_ZONED_HM 0x00000040
253#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
254
255struct dm_target {
256 struct dm_table *table;
257 struct target_type *type;
258
259 /* target limits */
260 sector_t begin;
261 sector_t len;
262
263 /* If non-zero, maximum size of I/O submitted to a target. */
264 uint32_t max_io_len;
265
266 /*
267 * A number of zero-length barrier bios that will be submitted
268 * to the target for the purpose of flushing cache.
269 *
270 * The bio number can be accessed with dm_bio_get_target_bio_nr.
271 * It is a responsibility of the target driver to remap these bios
272 * to the real underlying devices.
273 */
274 unsigned num_flush_bios;
275
276 /*
277 * The number of discard bios that will be submitted to the target.
278 * The bio number can be accessed with dm_bio_get_target_bio_nr.
279 */
280 unsigned num_discard_bios;
281
282 /*
283 * The number of secure erase bios that will be submitted to the target.
284 * The bio number can be accessed with dm_bio_get_target_bio_nr.
285 */
286 unsigned num_secure_erase_bios;
287
288 /*
289 * The number of WRITE SAME bios that will be submitted to the target.
290 * The bio number can be accessed with dm_bio_get_target_bio_nr.
291 */
292 unsigned num_write_same_bios;
293
294 /*
295 * The number of WRITE ZEROES bios that will be submitted to the target.
296 * The bio number can be accessed with dm_bio_get_target_bio_nr.
297 */
298 unsigned num_write_zeroes_bios;
299
300 /*
301 * The minimum number of extra bytes allocated in each io for the
302 * target to use.
303 */
304 unsigned per_io_data_size;
305
306 /* target specific data */
307 void *private;
308
309 /* Used to provide an error string from the ctr */
310 char *error;
311
312 /*
313 * Set if this target needs to receive flushes regardless of
314 * whether or not its underlying devices have support.
315 */
316 bool flush_supported:1;
317
318 /*
319 * Set if this target needs to receive discards regardless of
320 * whether or not its underlying devices have support.
321 */
322 bool discards_supported:1;
323};
324
325void *dm_per_bio_data(struct bio *bio, size_t data_size);
326struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
327unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
328
329u64 dm_start_time_ns_from_clone(struct bio *bio);
330
331int dm_register_target(struct target_type *t);
332void dm_unregister_target(struct target_type *t);
333
334/*
335 * Target argument parsing.
336 */
337struct dm_arg_set {
338 unsigned argc;
339 char **argv;
340};
341
342/*
343 * The minimum and maximum value of a numeric argument, together with
344 * the error message to use if the number is found to be outside that range.
345 */
346struct dm_arg {
347 unsigned min;
348 unsigned max;
349 char *error;
350};
351
352/*
353 * Validate the next argument, either returning it as *value or, if invalid,
354 * returning -EINVAL and setting *error.
355 */
356int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
357 unsigned *value, char **error);
358
359/*
360 * Process the next argument as the start of a group containing between
361 * arg->min and arg->max further arguments. Either return the size as
362 * *num_args or, if invalid, return -EINVAL and set *error.
363 */
364int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
365 unsigned *num_args, char **error);
366
367/*
368 * Return the current argument and shift to the next.
369 */
370const char *dm_shift_arg(struct dm_arg_set *as);
371
372/*
373 * Move through num_args arguments.
374 */
375void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
376
377/*-----------------------------------------------------------------
378 * Functions for creating and manipulating mapped devices.
379 * Drop the reference with dm_put when you finish with the object.
380 *---------------------------------------------------------------*/
381
382/*
383 * DM_ANY_MINOR chooses the next available minor number.
384 */
385#define DM_ANY_MINOR (-1)
386int dm_create(int minor, struct mapped_device **md);
387
388/*
389 * Reference counting for md.
390 */
391struct mapped_device *dm_get_md(dev_t dev);
392void dm_get(struct mapped_device *md);
393int dm_hold(struct mapped_device *md);
394void dm_put(struct mapped_device *md);
395
396/*
397 * An arbitrary pointer may be stored alongside a mapped device.
398 */
399void dm_set_mdptr(struct mapped_device *md, void *ptr);
400void *dm_get_mdptr(struct mapped_device *md);
401
402/*
403 * A device can still be used while suspended, but I/O is deferred.
404 */
405int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
406int dm_resume(struct mapped_device *md);
407
408/*
409 * Event functions.
410 */
411uint32_t dm_get_event_nr(struct mapped_device *md);
412int dm_wait_event(struct mapped_device *md, int event_nr);
413uint32_t dm_next_uevent_seq(struct mapped_device *md);
414void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
415
416/*
417 * Info functions.
418 */
419const char *dm_device_name(struct mapped_device *md);
420int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
421struct gendisk *dm_disk(struct mapped_device *md);
422int dm_suspended(struct dm_target *ti);
423int dm_post_suspending(struct dm_target *ti);
424int dm_noflush_suspending(struct dm_target *ti);
425void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
426union map_info *dm_get_rq_mapinfo(struct request *rq);
427
428#ifdef CONFIG_BLK_DEV_ZONED
429struct dm_report_zones_args {
430 struct dm_target *tgt;
431 sector_t next_sector;
432
433 void *orig_data;
434 report_zones_cb orig_cb;
435 unsigned int zone_idx;
436
437 /* must be filled by ->report_zones before calling dm_report_zones_cb */
438 sector_t start;
439};
440int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
441#endif /* CONFIG_BLK_DEV_ZONED */
442
443/*
444 * Device mapper functions to parse and create devices specified by the
445 * parameter "dm-mod.create="
446 */
447int __init dm_early_create(struct dm_ioctl *dmi,
448 struct dm_target_spec **spec_array,
449 char **target_params_array);
450
451struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
452
453/*
454 * Geometry functions.
455 */
456int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
457int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
458
459/*-----------------------------------------------------------------
460 * Functions for manipulating device-mapper tables.
461 *---------------------------------------------------------------*/
462
463/*
464 * First create an empty table.
465 */
466int dm_table_create(struct dm_table **result, fmode_t mode,
467 unsigned num_targets, struct mapped_device *md);
468
469/*
470 * Then call this once for each target.
471 */
472int dm_table_add_target(struct dm_table *t, const char *type,
473 sector_t start, sector_t len, char *params);
474
475/*
476 * Target can use this to set the table's type.
477 * Can only ever be called from a target's ctr.
478 * Useful for "hybrid" target (supports both bio-based
479 * and request-based).
480 */
481void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
482
483/*
484 * Finally call this to make the table ready for use.
485 */
486int dm_table_complete(struct dm_table *t);
487
488/*
489 * Destroy the table when finished.
490 */
491void dm_table_destroy(struct dm_table *t);
492
493/*
494 * Target may require that it is never sent I/O larger than len.
495 */
496int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
497
498/*
499 * Table reference counting.
500 */
501struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
502void dm_put_live_table(struct mapped_device *md, int srcu_idx);
503void dm_sync_table(struct mapped_device *md);
504
505/*
506 * Queries
507 */
508sector_t dm_table_get_size(struct dm_table *t);
509unsigned int dm_table_get_num_targets(struct dm_table *t);
510fmode_t dm_table_get_mode(struct dm_table *t);
511struct mapped_device *dm_table_get_md(struct dm_table *t);
512const char *dm_table_device_name(struct dm_table *t);
513
514/*
515 * Trigger an event.
516 */
517void dm_table_event(struct dm_table *t);
518
519/*
520 * Run the queue for request-based targets.
521 */
522void dm_table_run_md_queue_async(struct dm_table *t);
523
524/*
525 * The device must be suspended before calling this method.
526 * Returns the previous table, which the caller must destroy.
527 */
528struct dm_table *dm_swap_table(struct mapped_device *md,
529 struct dm_table *t);
530
531/*
532 * A wrapper around vmalloc.
533 */
534void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
535
536/*-----------------------------------------------------------------
537 * Macros.
538 *---------------------------------------------------------------*/
539#define DM_NAME "device-mapper"
540
541#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
542
543#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
544
545#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
546#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
547#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
548#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
549#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
550#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
551
552#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
553#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
554
555#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
556 0 : scnprintf(result + sz, maxlen - sz, x))
557
558/*
559 * Definitions of return values from target end_io function.
560 */
561#define DM_ENDIO_DONE 0
562#define DM_ENDIO_INCOMPLETE 1
563#define DM_ENDIO_REQUEUE 2
564#define DM_ENDIO_DELAY_REQUEUE 3
565
566/*
567 * Definitions of return values from target map function.
568 */
569#define DM_MAPIO_SUBMITTED 0
570#define DM_MAPIO_REMAPPED 1
571#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
572#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
573#define DM_MAPIO_KILL 4
574
575#define dm_sector_div64(x, y)( \
576{ \
577 u64 _res; \
578 (x) = div64_u64_rem(x, y, &_res); \
579 _res; \
580} \
581)
582
583/*
584 * Ceiling(n / sz)
585 */
586#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
587
588#define dm_sector_div_up(n, sz) ( \
589{ \
590 sector_t _r = ((n) + (sz) - 1); \
591 sector_div(_r, (sz)); \
592 _r; \
593} \
594)
595
596/*
597 * ceiling(n / size) * size
598 */
599#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
600
601/*
602 * Sector offset taken relative to the start of the target instead of
603 * relative to the start of the device.
604 */
605#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
606
607static inline sector_t to_sector(unsigned long long n)
608{
609 return (n >> SECTOR_SHIFT);
610}
611
612static inline unsigned long to_bytes(sector_t n)
613{
614 return (n << SECTOR_SHIFT);
615}
616
617#endif /* _LINUX_DEVICE_MAPPER_H */
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2001 Sistina Software (UK) Limited.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the LGPL.
7 */
8
9#ifndef _LINUX_DEVICE_MAPPER_H
10#define _LINUX_DEVICE_MAPPER_H
11
12#include <linux/bio.h>
13#include <linux/blkdev.h>
14#include <linux/dm-ioctl.h>
15#include <linux/math64.h>
16#include <linux/ratelimit.h>
17
18struct dm_dev;
19struct dm_target;
20struct dm_table;
21struct dm_report_zones_args;
22struct mapped_device;
23struct bio_vec;
24enum dax_access_mode;
25
26/*
27 * Type of table, mapped_device's mempool and request_queue
28 */
29enum dm_queue_mode {
30 DM_TYPE_NONE = 0,
31 DM_TYPE_BIO_BASED = 1,
32 DM_TYPE_REQUEST_BASED = 2,
33 DM_TYPE_DAX_BIO_BASED = 3,
34};
35
36typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
37
38union map_info {
39 void *ptr;
40};
41
42/*
43 * In the constructor the target parameter will already have the
44 * table, type, begin and len fields filled in.
45 */
46typedef int (*dm_ctr_fn) (struct dm_target *target,
47 unsigned int argc, char **argv);
48
49/*
50 * The destructor doesn't need to free the dm_target, just
51 * anything hidden ti->private.
52 */
53typedef void (*dm_dtr_fn) (struct dm_target *ti);
54
55/*
56 * The map function must return:
57 * < 0: error
58 * = 0: The target will handle the io by resubmitting it later
59 * = 1: simple remap complete
60 * = 2: The target wants to push back the io
61 */
62typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
63typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
64 struct request *rq,
65 union map_info *map_context,
66 struct request **clone);
67typedef void (*dm_release_clone_request_fn) (struct request *clone,
68 union map_info *map_context);
69
70/*
71 * Returns:
72 * < 0 : error (currently ignored)
73 * 0 : ended successfully
74 * 1 : for some reason the io has still not completed (eg,
75 * multipath target might want to requeue a failed io).
76 * 2 : The target wants to push back the io
77 */
78typedef int (*dm_endio_fn) (struct dm_target *ti,
79 struct bio *bio, blk_status_t *error);
80typedef int (*dm_request_endio_fn) (struct dm_target *ti,
81 struct request *clone, blk_status_t error,
82 union map_info *map_context);
83
84typedef void (*dm_presuspend_fn) (struct dm_target *ti);
85typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
86typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
87typedef int (*dm_preresume_fn) (struct dm_target *ti);
88typedef void (*dm_resume_fn) (struct dm_target *ti);
89
90typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
91 unsigned int status_flags, char *result, unsigned int maxlen);
92
93typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
94 char *result, unsigned int maxlen);
95
96typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
97
98#ifdef CONFIG_BLK_DEV_ZONED
99typedef int (*dm_report_zones_fn) (struct dm_target *ti,
100 struct dm_report_zones_args *args,
101 unsigned int nr_zones);
102#else
103/*
104 * Define dm_report_zones_fn so that targets can assign to NULL if
105 * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
106 * awkward #ifdefs in their target_type, etc.
107 */
108typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
109#endif
110
111/*
112 * These iteration functions are typically used to check (and combine)
113 * properties of underlying devices.
114 * E.g. Does at least one underlying device support flush?
115 * Does any underlying device not support WRITE_SAME?
116 *
117 * The callout function is called once for each contiguous section of
118 * an underlying device. State can be maintained in *data.
119 * Return non-zero to stop iterating through any further devices.
120 */
121typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
122 struct dm_dev *dev,
123 sector_t start, sector_t len,
124 void *data);
125
126/*
127 * This function must iterate through each section of device used by the
128 * target until it encounters a non-zero return code, which it then returns.
129 * Returns zero if no callout returned non-zero.
130 */
131typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
132 iterate_devices_callout_fn fn,
133 void *data);
134
135typedef void (*dm_io_hints_fn) (struct dm_target *ti,
136 struct queue_limits *limits);
137
138/*
139 * Returns:
140 * 0: The target can handle the next I/O immediately.
141 * 1: The target can't handle the next I/O immediately.
142 */
143typedef int (*dm_busy_fn) (struct dm_target *ti);
144
145/*
146 * Returns:
147 * < 0 : error
148 * >= 0 : the number of bytes accessible at the address
149 */
150typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
151 long nr_pages, enum dax_access_mode node, void **kaddr,
152 pfn_t *pfn);
153typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
154 size_t nr_pages);
155
156/*
157 * Returns:
158 * != 0 : number of bytes transferred
159 * 0 : recovery write failed
160 */
161typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
162 void *addr, size_t bytes, struct iov_iter *i);
163
164void dm_error(const char *message);
165
166struct dm_dev {
167 struct block_device *bdev;
168 struct file *bdev_file;
169 struct dax_device *dax_dev;
170 blk_mode_t mode;
171 char name[16];
172};
173
174/*
175 * Constructors should call these functions to ensure destination devices
176 * are opened/closed correctly.
177 */
178int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
179 struct dm_dev **result);
180void dm_put_device(struct dm_target *ti, struct dm_dev *d);
181
182/*
183 * Information about a target type
184 */
185
186struct target_type {
187 uint64_t features;
188 const char *name;
189 struct module *module;
190 unsigned int version[3];
191 dm_ctr_fn ctr;
192 dm_dtr_fn dtr;
193 dm_map_fn map;
194 dm_clone_and_map_request_fn clone_and_map_rq;
195 dm_release_clone_request_fn release_clone_rq;
196 dm_endio_fn end_io;
197 dm_request_endio_fn rq_end_io;
198 dm_presuspend_fn presuspend;
199 dm_presuspend_undo_fn presuspend_undo;
200 dm_postsuspend_fn postsuspend;
201 dm_preresume_fn preresume;
202 dm_resume_fn resume;
203 dm_status_fn status;
204 dm_message_fn message;
205 dm_prepare_ioctl_fn prepare_ioctl;
206 dm_report_zones_fn report_zones;
207 dm_busy_fn busy;
208 dm_iterate_devices_fn iterate_devices;
209 dm_io_hints_fn io_hints;
210 dm_dax_direct_access_fn direct_access;
211 dm_dax_zero_page_range_fn dax_zero_page_range;
212 dm_dax_recovery_write_fn dax_recovery_write;
213
214 /* For internal device-mapper use. */
215 struct list_head list;
216};
217
218/*
219 * Target features
220 */
221
222/*
223 * Any table that contains an instance of this target must have only one.
224 */
225#define DM_TARGET_SINGLETON 0x00000001
226#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
227
228/*
229 * Indicates that a target does not support read-only devices.
230 */
231#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
232#define dm_target_always_writeable(type) \
233 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
234
235/*
236 * Any device that contains a table with an instance of this target may never
237 * have tables containing any different target type.
238 */
239#define DM_TARGET_IMMUTABLE 0x00000004
240#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
241
242/*
243 * Indicates that a target may replace any target; even immutable targets.
244 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
245 */
246#define DM_TARGET_WILDCARD 0x00000008
247#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
248
249/*
250 * A target implements own bio data integrity.
251 */
252#define DM_TARGET_INTEGRITY 0x00000010
253#define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
254
255/*
256 * A target passes integrity data to the lower device.
257 */
258#define DM_TARGET_PASSES_INTEGRITY 0x00000020
259#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
260
261/*
262 * Indicates support for zoned block devices:
263 * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
264 * block devices but does not support combining different zoned models.
265 * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
266 * devices with different zoned models.
267 */
268#ifdef CONFIG_BLK_DEV_ZONED
269#define DM_TARGET_ZONED_HM 0x00000040
270#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
271#else
272#define DM_TARGET_ZONED_HM 0x00000000
273#define dm_target_supports_zoned_hm(type) (false)
274#endif
275
276/*
277 * A target handles REQ_NOWAIT
278 */
279#define DM_TARGET_NOWAIT 0x00000080
280#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
281
282/*
283 * A target supports passing through inline crypto support.
284 */
285#define DM_TARGET_PASSES_CRYPTO 0x00000100
286#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
287
288#ifdef CONFIG_BLK_DEV_ZONED
289#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
290#define dm_target_supports_mixed_zoned_model(type) \
291 ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
292#else
293#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
294#define dm_target_supports_mixed_zoned_model(type) (false)
295#endif
296
297struct dm_target {
298 struct dm_table *table;
299 struct target_type *type;
300
301 /* target limits */
302 sector_t begin;
303 sector_t len;
304
305 /* If non-zero, maximum size of I/O submitted to a target. */
306 uint32_t max_io_len;
307
308 /*
309 * A number of zero-length barrier bios that will be submitted
310 * to the target for the purpose of flushing cache.
311 *
312 * The bio number can be accessed with dm_bio_get_target_bio_nr.
313 * It is a responsibility of the target driver to remap these bios
314 * to the real underlying devices.
315 */
316 unsigned int num_flush_bios;
317
318 /*
319 * The number of discard bios that will be submitted to the target.
320 * The bio number can be accessed with dm_bio_get_target_bio_nr.
321 */
322 unsigned int num_discard_bios;
323
324 /*
325 * The number of secure erase bios that will be submitted to the target.
326 * The bio number can be accessed with dm_bio_get_target_bio_nr.
327 */
328 unsigned int num_secure_erase_bios;
329
330 /*
331 * The number of WRITE ZEROES bios that will be submitted to the target.
332 * The bio number can be accessed with dm_bio_get_target_bio_nr.
333 */
334 unsigned int num_write_zeroes_bios;
335
336 /*
337 * The minimum number of extra bytes allocated in each io for the
338 * target to use.
339 */
340 unsigned int per_io_data_size;
341
342 /* target specific data */
343 void *private;
344
345 /* Used to provide an error string from the ctr */
346 char *error;
347
348 /*
349 * Set if this target needs to receive flushes regardless of
350 * whether or not its underlying devices have support.
351 */
352 bool flush_supported:1;
353
354 /*
355 * Set if this target needs to receive discards regardless of
356 * whether or not its underlying devices have support.
357 */
358 bool discards_supported:1;
359
360 /*
361 * Set if this target requires that discards be split on
362 * 'max_discard_sectors' boundaries.
363 */
364 bool max_discard_granularity:1;
365
366 /*
367 * Set if this target requires that secure_erases be split on
368 * 'max_secure_erase_sectors' boundaries.
369 */
370 bool max_secure_erase_granularity:1;
371
372 /*
373 * Set if this target requires that write_zeroes be split on
374 * 'max_write_zeroes_sectors' boundaries.
375 */
376 bool max_write_zeroes_granularity:1;
377
378 /*
379 * Set if we need to limit the number of in-flight bios when swapping.
380 */
381 bool limit_swap_bios:1;
382
383 /*
384 * Set if this target implements a zoned device and needs emulation of
385 * zone append operations using regular writes.
386 */
387 bool emulate_zone_append:1;
388
389 /*
390 * Set if the target will submit IO using dm_submit_bio_remap()
391 * after returning DM_MAPIO_SUBMITTED from its map function.
392 */
393 bool accounts_remapped_io:1;
394
395 /*
396 * Set if the target will submit the DM bio without first calling
397 * bio_set_dev(). NOTE: ideally a target should _not_ need this.
398 */
399 bool needs_bio_set_dev:1;
400};
401
402void *dm_per_bio_data(struct bio *bio, size_t data_size);
403struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
404unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
405
406u64 dm_start_time_ns_from_clone(struct bio *bio);
407
408int dm_register_target(struct target_type *t);
409void dm_unregister_target(struct target_type *t);
410
411/*
412 * Target argument parsing.
413 */
414struct dm_arg_set {
415 unsigned int argc;
416 char **argv;
417};
418
419/*
420 * The minimum and maximum value of a numeric argument, together with
421 * the error message to use if the number is found to be outside that range.
422 */
423struct dm_arg {
424 unsigned int min;
425 unsigned int max;
426 char *error;
427};
428
429/*
430 * Validate the next argument, either returning it as *value or, if invalid,
431 * returning -EINVAL and setting *error.
432 */
433int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
434 unsigned int *value, char **error);
435
436/*
437 * Process the next argument as the start of a group containing between
438 * arg->min and arg->max further arguments. Either return the size as
439 * *num_args or, if invalid, return -EINVAL and set *error.
440 */
441int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
442 unsigned int *num_args, char **error);
443
444/*
445 * Return the current argument and shift to the next.
446 */
447const char *dm_shift_arg(struct dm_arg_set *as);
448
449/*
450 * Move through num_args arguments.
451 */
452void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
453
454/*
455 *----------------------------------------------------------------
456 * Functions for creating and manipulating mapped devices.
457 * Drop the reference with dm_put when you finish with the object.
458 *----------------------------------------------------------------
459 */
460
461/*
462 * DM_ANY_MINOR chooses the next available minor number.
463 */
464#define DM_ANY_MINOR (-1)
465int dm_create(int minor, struct mapped_device **md);
466
467/*
468 * Reference counting for md.
469 */
470struct mapped_device *dm_get_md(dev_t dev);
471void dm_get(struct mapped_device *md);
472int dm_hold(struct mapped_device *md);
473void dm_put(struct mapped_device *md);
474
475/*
476 * An arbitrary pointer may be stored alongside a mapped device.
477 */
478void dm_set_mdptr(struct mapped_device *md, void *ptr);
479void *dm_get_mdptr(struct mapped_device *md);
480
481/*
482 * A device can still be used while suspended, but I/O is deferred.
483 */
484int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
485int dm_resume(struct mapped_device *md);
486
487/*
488 * Event functions.
489 */
490uint32_t dm_get_event_nr(struct mapped_device *md);
491int dm_wait_event(struct mapped_device *md, int event_nr);
492uint32_t dm_next_uevent_seq(struct mapped_device *md);
493void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
494
495/*
496 * Info functions.
497 */
498const char *dm_device_name(struct mapped_device *md);
499int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
500struct gendisk *dm_disk(struct mapped_device *md);
501int dm_suspended(struct dm_target *ti);
502int dm_post_suspending(struct dm_target *ti);
503int dm_noflush_suspending(struct dm_target *ti);
504void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
505void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
506union map_info *dm_get_rq_mapinfo(struct request *rq);
507
508#ifdef CONFIG_BLK_DEV_ZONED
509struct dm_report_zones_args {
510 struct dm_target *tgt;
511 sector_t next_sector;
512
513 void *orig_data;
514 report_zones_cb orig_cb;
515 unsigned int zone_idx;
516
517 /* must be filled by ->report_zones before calling dm_report_zones_cb */
518 sector_t start;
519};
520int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
521 struct dm_report_zones_args *args, unsigned int nr_zones);
522#endif /* CONFIG_BLK_DEV_ZONED */
523
524/*
525 * Device mapper functions to parse and create devices specified by the
526 * parameter "dm-mod.create="
527 */
528int __init dm_early_create(struct dm_ioctl *dmi,
529 struct dm_target_spec **spec_array,
530 char **target_params_array);
531
532/*
533 * Geometry functions.
534 */
535int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
536int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
537
538/*
539 *---------------------------------------------------------------
540 * Functions for manipulating device-mapper tables.
541 *---------------------------------------------------------------
542 */
543
544/*
545 * First create an empty table.
546 */
547int dm_table_create(struct dm_table **result, blk_mode_t mode,
548 unsigned int num_targets, struct mapped_device *md);
549
550/*
551 * Then call this once for each target.
552 */
553int dm_table_add_target(struct dm_table *t, const char *type,
554 sector_t start, sector_t len, char *params);
555
556/*
557 * Target can use this to set the table's type.
558 * Can only ever be called from a target's ctr.
559 * Useful for "hybrid" target (supports both bio-based
560 * and request-based).
561 */
562void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
563
564/*
565 * Finally call this to make the table ready for use.
566 */
567int dm_table_complete(struct dm_table *t);
568
569/*
570 * Destroy the table when finished.
571 */
572void dm_table_destroy(struct dm_table *t);
573
574/*
575 * Target may require that it is never sent I/O larger than len.
576 */
577int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
578
579/*
580 * Table reference counting.
581 */
582struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
583void dm_put_live_table(struct mapped_device *md, int srcu_idx);
584void dm_sync_table(struct mapped_device *md);
585
586/*
587 * Queries
588 */
589sector_t dm_table_get_size(struct dm_table *t);
590blk_mode_t dm_table_get_mode(struct dm_table *t);
591struct mapped_device *dm_table_get_md(struct dm_table *t);
592const char *dm_table_device_name(struct dm_table *t);
593
594/*
595 * Trigger an event.
596 */
597void dm_table_event(struct dm_table *t);
598
599/*
600 * Run the queue for request-based targets.
601 */
602void dm_table_run_md_queue_async(struct dm_table *t);
603
604/*
605 * The device must be suspended before calling this method.
606 * Returns the previous table, which the caller must destroy.
607 */
608struct dm_table *dm_swap_table(struct mapped_device *md,
609 struct dm_table *t);
610
611/*
612 * Table blk_crypto_profile functions
613 */
614void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
615
616/*
617 *---------------------------------------------------------------
618 * Macros.
619 *---------------------------------------------------------------
620 */
621#define DM_NAME "device-mapper"
622
623#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
624
625#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
626
627#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
628#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
629#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
630#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
631#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
632#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
633
634#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
635#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
636
637#define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
638
639#define DMEMIT_TARGET_NAME_VERSION(y) \
640 DMEMIT("target_name=%s,target_version=%u.%u.%u", \
641 (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
642
643/**
644 * module_dm() - Helper macro for DM targets that don't do anything
645 * special in their module_init and module_exit.
646 * Each module may only use this macro once, and calling it replaces
647 * module_init() and module_exit().
648 *
649 * @name: DM target's name
650 */
651#define module_dm(name) \
652static int __init dm_##name##_init(void) \
653{ \
654 return dm_register_target(&(name##_target)); \
655} \
656module_init(dm_##name##_init) \
657static void __exit dm_##name##_exit(void) \
658{ \
659 dm_unregister_target(&(name##_target)); \
660} \
661module_exit(dm_##name##_exit)
662
663/*
664 * Definitions of return values from target end_io function.
665 */
666#define DM_ENDIO_DONE 0
667#define DM_ENDIO_INCOMPLETE 1
668#define DM_ENDIO_REQUEUE 2
669#define DM_ENDIO_DELAY_REQUEUE 3
670
671/*
672 * Definitions of return values from target map function.
673 */
674#define DM_MAPIO_SUBMITTED 0
675#define DM_MAPIO_REMAPPED 1
676#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
677#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
678#define DM_MAPIO_KILL 4
679
680#define dm_sector_div64(x, y)( \
681{ \
682 u64 _res; \
683 (x) = div64_u64_rem(x, y, &_res); \
684 _res; \
685} \
686)
687
688/*
689 * Ceiling(n / sz)
690 */
691#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
692
693#define dm_sector_div_up(n, sz) ( \
694{ \
695 sector_t _r = ((n) + (sz) - 1); \
696 sector_div(_r, (sz)); \
697 _r; \
698} \
699)
700
701/*
702 * ceiling(n / size) * size
703 */
704#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
705
706/*
707 * Sector offset taken relative to the start of the target instead of
708 * relative to the start of the device.
709 */
710#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
711
712static inline sector_t to_sector(unsigned long long n)
713{
714 return (n >> SECTOR_SHIFT);
715}
716
717static inline unsigned long to_bytes(sector_t n)
718{
719 return (n << SECTOR_SHIFT);
720}
721
722#endif /* _LINUX_DEVICE_MAPPER_H */