Linux Audio

Check our new training course

Loading...
v5.14.15
 
  1/*
  2 * Copyright (C) 2001 Sistina Software (UK) Limited.
  3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4 *
  5 * This file is released under the LGPL.
  6 */
  7
  8#ifndef _LINUX_DEVICE_MAPPER_H
  9#define _LINUX_DEVICE_MAPPER_H
 10
 11#include <linux/bio.h>
 12#include <linux/blkdev.h>
 13#include <linux/dm-ioctl.h>
 14#include <linux/math64.h>
 15#include <linux/ratelimit.h>
 16
 17struct dm_dev;
 18struct dm_target;
 19struct dm_table;
 20struct dm_report_zones_args;
 21struct mapped_device;
 22struct bio_vec;
 
 23
 24/*
 25 * Type of table, mapped_device's mempool and request_queue
 26 */
 27enum dm_queue_mode {
 28	DM_TYPE_NONE		 = 0,
 29	DM_TYPE_BIO_BASED	 = 1,
 30	DM_TYPE_REQUEST_BASED	 = 2,
 31	DM_TYPE_DAX_BIO_BASED	 = 3,
 32};
 33
 34typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
 35
 36union map_info {
 37	void *ptr;
 38};
 39
 40/*
 41 * In the constructor the target parameter will already have the
 42 * table, type, begin and len fields filled in.
 43 */
 44typedef int (*dm_ctr_fn) (struct dm_target *target,
 45			  unsigned int argc, char **argv);
 46
 47/*
 48 * The destructor doesn't need to free the dm_target, just
 49 * anything hidden ti->private.
 50 */
 51typedef void (*dm_dtr_fn) (struct dm_target *ti);
 52
 53/*
 54 * The map function must return:
 55 * < 0: error
 56 * = 0: The target will handle the io by resubmitting it later
 57 * = 1: simple remap complete
 58 * = 2: The target wants to push back the io
 59 */
 60typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
 61typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
 62					    struct request *rq,
 63					    union map_info *map_context,
 64					    struct request **clone);
 65typedef void (*dm_release_clone_request_fn) (struct request *clone,
 66					     union map_info *map_context);
 67
 68/*
 69 * Returns:
 70 * < 0 : error (currently ignored)
 71 * 0   : ended successfully
 72 * 1   : for some reason the io has still not completed (eg,
 73 *       multipath target might want to requeue a failed io).
 74 * 2   : The target wants to push back the io
 75 */
 76typedef int (*dm_endio_fn) (struct dm_target *ti,
 77			    struct bio *bio, blk_status_t *error);
 78typedef int (*dm_request_endio_fn) (struct dm_target *ti,
 79				    struct request *clone, blk_status_t error,
 80				    union map_info *map_context);
 81
 82typedef void (*dm_presuspend_fn) (struct dm_target *ti);
 83typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
 84typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
 85typedef int (*dm_preresume_fn) (struct dm_target *ti);
 86typedef void (*dm_resume_fn) (struct dm_target *ti);
 87
 88typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
 89			      unsigned status_flags, char *result, unsigned maxlen);
 90
 91typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
 92			      char *result, unsigned maxlen);
 93
 94typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
 95
 96#ifdef CONFIG_BLK_DEV_ZONED
 97typedef int (*dm_report_zones_fn) (struct dm_target *ti,
 98				   struct dm_report_zones_args *args,
 99				   unsigned int nr_zones);
100#else
101/*
102 * Define dm_report_zones_fn so that targets can assign to NULL if
103 * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
104 * awkward #ifdefs in their target_type, etc.
105 */
106typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
107#endif
108
109/*
110 * These iteration functions are typically used to check (and combine)
111 * properties of underlying devices.
112 * E.g. Does at least one underlying device support flush?
113 *      Does any underlying device not support WRITE_SAME?
114 *
115 * The callout function is called once for each contiguous section of
116 * an underlying device.  State can be maintained in *data.
117 * Return non-zero to stop iterating through any further devices.
118 */
119typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
120					   struct dm_dev *dev,
121					   sector_t start, sector_t len,
122					   void *data);
123
124/*
125 * This function must iterate through each section of device used by the
126 * target until it encounters a non-zero return code, which it then returns.
127 * Returns zero if no callout returned non-zero.
128 */
129typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
130				      iterate_devices_callout_fn fn,
131				      void *data);
132
133typedef void (*dm_io_hints_fn) (struct dm_target *ti,
134				struct queue_limits *limits);
135
136/*
137 * Returns:
138 *    0: The target can handle the next I/O immediately.
139 *    1: The target can't handle the next I/O immediately.
140 */
141typedef int (*dm_busy_fn) (struct dm_target *ti);
142
143/*
144 * Returns:
145 *  < 0 : error
146 * >= 0 : the number of bytes accessible at the address
147 */
148typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
149		long nr_pages, void **kaddr, pfn_t *pfn);
150typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
151		void *addr, size_t bytes, struct iov_iter *i);
152typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
153		size_t nr_pages);
154#define PAGE_SECTORS (PAGE_SIZE / 512)
 
 
 
 
 
 
 
155
156void dm_error(const char *message);
157
158struct dm_dev {
159	struct block_device *bdev;
 
160	struct dax_device *dax_dev;
161	fmode_t mode;
162	char name[16];
163};
164
165dev_t dm_get_dev_t(const char *path);
166
167/*
168 * Constructors should call these functions to ensure destination devices
169 * are opened/closed correctly.
170 */
171int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
172		  struct dm_dev **result);
173void dm_put_device(struct dm_target *ti, struct dm_dev *d);
174
175/*
 
 
 
 
 
176 * Information about a target type
177 */
178
179struct target_type {
180	uint64_t features;
181	const char *name;
182	struct module *module;
183	unsigned version[3];
184	dm_ctr_fn ctr;
185	dm_dtr_fn dtr;
186	dm_map_fn map;
187	dm_clone_and_map_request_fn clone_and_map_rq;
188	dm_release_clone_request_fn release_clone_rq;
189	dm_endio_fn end_io;
190	dm_request_endio_fn rq_end_io;
191	dm_presuspend_fn presuspend;
192	dm_presuspend_undo_fn presuspend_undo;
193	dm_postsuspend_fn postsuspend;
194	dm_preresume_fn preresume;
195	dm_resume_fn resume;
196	dm_status_fn status;
197	dm_message_fn message;
198	dm_prepare_ioctl_fn prepare_ioctl;
199	dm_report_zones_fn report_zones;
200	dm_busy_fn busy;
201	dm_iterate_devices_fn iterate_devices;
202	dm_io_hints_fn io_hints;
203	dm_dax_direct_access_fn direct_access;
204	dm_dax_copy_iter_fn dax_copy_from_iter;
205	dm_dax_copy_iter_fn dax_copy_to_iter;
206	dm_dax_zero_page_range_fn dax_zero_page_range;
 
207
208	/* For internal device-mapper use. */
209	struct list_head list;
210};
211
212/*
213 * Target features
214 */
215
216/*
217 * Any table that contains an instance of this target must have only one.
218 */
219#define DM_TARGET_SINGLETON		0x00000001
220#define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
221
222/*
223 * Indicates that a target does not support read-only devices.
224 */
225#define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
226#define dm_target_always_writeable(type) \
227		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
228
229/*
230 * Any device that contains a table with an instance of this target may never
231 * have tables containing any different target type.
232 */
233#define DM_TARGET_IMMUTABLE		0x00000004
234#define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
235
236/*
237 * Indicates that a target may replace any target; even immutable targets.
238 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
239 */
240#define DM_TARGET_WILDCARD		0x00000008
241#define dm_target_is_wildcard(type)	((type)->features & DM_TARGET_WILDCARD)
242
243/*
244 * A target implements own bio data integrity.
245 */
246#define DM_TARGET_INTEGRITY		0x00000010
247#define dm_target_has_integrity(type)	((type)->features & DM_TARGET_INTEGRITY)
248
249/*
250 * A target passes integrity data to the lower device.
251 */
252#define DM_TARGET_PASSES_INTEGRITY	0x00000020
253#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
254
255/*
256 * Indicates support for zoned block devices:
257 * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
258 *   block devices but does not support combining different zoned models.
259 * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
260 *   devices with different zoned models.
261 */
262#ifdef CONFIG_BLK_DEV_ZONED
263#define DM_TARGET_ZONED_HM		0x00000040
264#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
265#else
266#define DM_TARGET_ZONED_HM		0x00000000
267#define dm_target_supports_zoned_hm(type) (false)
268#endif
269
270/*
271 * A target handles REQ_NOWAIT
272 */
273#define DM_TARGET_NOWAIT		0x00000080
274#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
275
276/*
277 * A target supports passing through inline crypto support.
278 */
279#define DM_TARGET_PASSES_CRYPTO		0x00000100
280#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
281
282#ifdef CONFIG_BLK_DEV_ZONED
283#define DM_TARGET_MIXED_ZONED_MODEL	0x00000200
284#define dm_target_supports_mixed_zoned_model(type) \
285	((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
286#else
287#define DM_TARGET_MIXED_ZONED_MODEL	0x00000000
288#define dm_target_supports_mixed_zoned_model(type) (false)
289#endif
290
291struct dm_target {
292	struct dm_table *table;
293	struct target_type *type;
294
295	/* target limits */
296	sector_t begin;
297	sector_t len;
298
299	/* If non-zero, maximum size of I/O submitted to a target. */
300	uint32_t max_io_len;
301
302	/*
303	 * A number of zero-length barrier bios that will be submitted
304	 * to the target for the purpose of flushing cache.
305	 *
306	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
307	 * It is a responsibility of the target driver to remap these bios
308	 * to the real underlying devices.
309	 */
310	unsigned num_flush_bios;
311
312	/*
313	 * The number of discard bios that will be submitted to the target.
314	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
315	 */
316	unsigned num_discard_bios;
317
318	/*
319	 * The number of secure erase bios that will be submitted to the target.
320	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
321	 */
322	unsigned num_secure_erase_bios;
323
324	/*
325	 * The number of WRITE SAME bios that will be submitted to the target.
326	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
327	 */
328	unsigned num_write_same_bios;
329
330	/*
331	 * The number of WRITE ZEROES bios that will be submitted to the target.
332	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
333	 */
334	unsigned num_write_zeroes_bios;
335
336	/*
337	 * The minimum number of extra bytes allocated in each io for the
338	 * target to use.
339	 */
340	unsigned per_io_data_size;
341
342	/* target specific data */
343	void *private;
344
345	/* Used to provide an error string from the ctr */
346	char *error;
347
348	/*
349	 * Set if this target needs to receive flushes regardless of
350	 * whether or not its underlying devices have support.
351	 */
352	bool flush_supported:1;
353
354	/*
355	 * Set if this target needs to receive discards regardless of
356	 * whether or not its underlying devices have support.
357	 */
358	bool discards_supported:1;
359
360	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
361	 * Set if we need to limit the number of in-flight bios when swapping.
362	 */
363	bool limit_swap_bios:1;
364
365	/*
366	 * Set if this target implements a a zoned device and needs emulation of
367	 * zone append operations using regular writes.
368	 */
369	bool emulate_zone_append:1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370};
371
372void *dm_per_bio_data(struct bio *bio, size_t data_size);
373struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
374unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
375
376u64 dm_start_time_ns_from_clone(struct bio *bio);
377
378int dm_register_target(struct target_type *t);
379void dm_unregister_target(struct target_type *t);
380
381/*
382 * Target argument parsing.
383 */
384struct dm_arg_set {
385	unsigned argc;
386	char **argv;
387};
388
389/*
390 * The minimum and maximum value of a numeric argument, together with
391 * the error message to use if the number is found to be outside that range.
392 */
393struct dm_arg {
394	unsigned min;
395	unsigned max;
396	char *error;
397};
398
399/*
400 * Validate the next argument, either returning it as *value or, if invalid,
401 * returning -EINVAL and setting *error.
402 */
403int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
404		unsigned *value, char **error);
405
406/*
407 * Process the next argument as the start of a group containing between
408 * arg->min and arg->max further arguments. Either return the size as
409 * *num_args or, if invalid, return -EINVAL and set *error.
410 */
411int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
412		      unsigned *num_args, char **error);
413
414/*
415 * Return the current argument and shift to the next.
416 */
417const char *dm_shift_arg(struct dm_arg_set *as);
418
419/*
420 * Move through num_args arguments.
421 */
422void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
423
424/*-----------------------------------------------------------------
 
425 * Functions for creating and manipulating mapped devices.
426 * Drop the reference with dm_put when you finish with the object.
427 *---------------------------------------------------------------*/
 
428
429/*
430 * DM_ANY_MINOR chooses the next available minor number.
431 */
432#define DM_ANY_MINOR (-1)
433int dm_create(int minor, struct mapped_device **md);
434
435/*
436 * Reference counting for md.
437 */
438struct mapped_device *dm_get_md(dev_t dev);
439void dm_get(struct mapped_device *md);
440int dm_hold(struct mapped_device *md);
441void dm_put(struct mapped_device *md);
442
443/*
444 * An arbitrary pointer may be stored alongside a mapped device.
445 */
446void dm_set_mdptr(struct mapped_device *md, void *ptr);
447void *dm_get_mdptr(struct mapped_device *md);
448
449/*
450 * A device can still be used while suspended, but I/O is deferred.
451 */
452int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
453int dm_resume(struct mapped_device *md);
454
455/*
456 * Event functions.
457 */
458uint32_t dm_get_event_nr(struct mapped_device *md);
459int dm_wait_event(struct mapped_device *md, int event_nr);
460uint32_t dm_next_uevent_seq(struct mapped_device *md);
461void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
462
463/*
464 * Info functions.
465 */
466const char *dm_device_name(struct mapped_device *md);
467int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
468struct gendisk *dm_disk(struct mapped_device *md);
469int dm_suspended(struct dm_target *ti);
470int dm_post_suspending(struct dm_target *ti);
471int dm_noflush_suspending(struct dm_target *ti);
472void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
473union map_info *dm_get_rq_mapinfo(struct request *rq);
474
475#ifdef CONFIG_BLK_DEV_ZONED
476struct dm_report_zones_args {
477	struct dm_target *tgt;
478	sector_t next_sector;
479
480	void *orig_data;
481	report_zones_cb orig_cb;
482	unsigned int zone_idx;
483
484	/* must be filled by ->report_zones before calling dm_report_zones_cb */
485	sector_t start;
486};
487int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
488		    struct dm_report_zones_args *args, unsigned int nr_zones);
489#endif /* CONFIG_BLK_DEV_ZONED */
490
491/*
492 * Device mapper functions to parse and create devices specified by the
493 * parameter "dm-mod.create="
494 */
495int __init dm_early_create(struct dm_ioctl *dmi,
496			   struct dm_target_spec **spec_array,
497			   char **target_params_array);
498
499struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
500
501/*
502 * Geometry functions.
503 */
504int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
505int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
506
507/*-----------------------------------------------------------------
 
508 * Functions for manipulating device-mapper tables.
509 *---------------------------------------------------------------*/
 
510
511/*
512 * First create an empty table.
513 */
514int dm_table_create(struct dm_table **result, fmode_t mode,
515		    unsigned num_targets, struct mapped_device *md);
516
517/*
518 * Then call this once for each target.
519 */
520int dm_table_add_target(struct dm_table *t, const char *type,
521			sector_t start, sector_t len, char *params);
522
523/*
524 * Target can use this to set the table's type.
525 * Can only ever be called from a target's ctr.
526 * Useful for "hybrid" target (supports both bio-based
527 * and request-based).
528 */
529void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
530
531/*
532 * Finally call this to make the table ready for use.
533 */
534int dm_table_complete(struct dm_table *t);
535
536/*
537 * Destroy the table when finished.
538 */
539void dm_table_destroy(struct dm_table *t);
540
541/*
542 * Target may require that it is never sent I/O larger than len.
543 */
544int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
545
546/*
547 * Table reference counting.
548 */
549struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
550void dm_put_live_table(struct mapped_device *md, int srcu_idx);
551void dm_sync_table(struct mapped_device *md);
552
553/*
554 * Queries
555 */
556sector_t dm_table_get_size(struct dm_table *t);
557unsigned int dm_table_get_num_targets(struct dm_table *t);
558fmode_t dm_table_get_mode(struct dm_table *t);
559struct mapped_device *dm_table_get_md(struct dm_table *t);
560const char *dm_table_device_name(struct dm_table *t);
561
562/*
563 * Trigger an event.
564 */
565void dm_table_event(struct dm_table *t);
566
567/*
568 * Run the queue for request-based targets.
569 */
570void dm_table_run_md_queue_async(struct dm_table *t);
571
572/*
573 * The device must be suspended before calling this method.
574 * Returns the previous table, which the caller must destroy.
575 */
576struct dm_table *dm_swap_table(struct mapped_device *md,
577			       struct dm_table *t);
578
579/*
580 * Table keyslot manager functions
581 */
582void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
583
584/*-----------------------------------------------------------------
 
585 * Macros.
586 *---------------------------------------------------------------*/
 
587#define DM_NAME "device-mapper"
588
589#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
590
591#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
592
593#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
594#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
595#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
596#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
597#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
598#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
599
600#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
601#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
602
603#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
604			  0 : scnprintf(result + sz, maxlen - sz, x))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
606/*
607 * Definitions of return values from target end_io function.
608 */
609#define DM_ENDIO_DONE		0
610#define DM_ENDIO_INCOMPLETE	1
611#define DM_ENDIO_REQUEUE	2
612#define DM_ENDIO_DELAY_REQUEUE	3
613
614/*
615 * Definitions of return values from target map function.
616 */
617#define DM_MAPIO_SUBMITTED	0
618#define DM_MAPIO_REMAPPED	1
619#define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
620#define DM_MAPIO_DELAY_REQUEUE	DM_ENDIO_DELAY_REQUEUE
621#define DM_MAPIO_KILL		4
622
623#define dm_sector_div64(x, y)( \
624{ \
625	u64 _res; \
626	(x) = div64_u64_rem(x, y, &_res); \
627	_res; \
628} \
629)
630
631/*
632 * Ceiling(n / sz)
633 */
634#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
635
636#define dm_sector_div_up(n, sz) ( \
637{ \
638	sector_t _r = ((n) + (sz) - 1); \
639	sector_div(_r, (sz)); \
640	_r; \
641} \
642)
643
644/*
645 * ceiling(n / size) * size
646 */
647#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
648
649/*
650 * Sector offset taken relative to the start of the target instead of
651 * relative to the start of the device.
652 */
653#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
654
655static inline sector_t to_sector(unsigned long long n)
656{
657	return (n >> SECTOR_SHIFT);
658}
659
660static inline unsigned long to_bytes(sector_t n)
661{
662	return (n << SECTOR_SHIFT);
663}
664
665#endif	/* _LINUX_DEVICE_MAPPER_H */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2001 Sistina Software (UK) Limited.
  4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  5 *
  6 * This file is released under the LGPL.
  7 */
  8
  9#ifndef _LINUX_DEVICE_MAPPER_H
 10#define _LINUX_DEVICE_MAPPER_H
 11
 12#include <linux/bio.h>
 13#include <linux/blkdev.h>
 14#include <linux/dm-ioctl.h>
 15#include <linux/math64.h>
 16#include <linux/ratelimit.h>
 17
 18struct dm_dev;
 19struct dm_target;
 20struct dm_table;
 21struct dm_report_zones_args;
 22struct mapped_device;
 23struct bio_vec;
 24enum dax_access_mode;
 25
 26/*
 27 * Type of table, mapped_device's mempool and request_queue
 28 */
 29enum dm_queue_mode {
 30	DM_TYPE_NONE		 = 0,
 31	DM_TYPE_BIO_BASED	 = 1,
 32	DM_TYPE_REQUEST_BASED	 = 2,
 33	DM_TYPE_DAX_BIO_BASED	 = 3,
 34};
 35
 36typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
 37
 38union map_info {
 39	void *ptr;
 40};
 41
 42/*
 43 * In the constructor the target parameter will already have the
 44 * table, type, begin and len fields filled in.
 45 */
 46typedef int (*dm_ctr_fn) (struct dm_target *target,
 47			  unsigned int argc, char **argv);
 48
 49/*
 50 * The destructor doesn't need to free the dm_target, just
 51 * anything hidden ti->private.
 52 */
 53typedef void (*dm_dtr_fn) (struct dm_target *ti);
 54
 55/*
 56 * The map function must return:
 57 * < 0: error
 58 * = 0: The target will handle the io by resubmitting it later
 59 * = 1: simple remap complete
 60 * = 2: The target wants to push back the io
 61 */
 62typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
 63typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
 64					    struct request *rq,
 65					    union map_info *map_context,
 66					    struct request **clone);
 67typedef void (*dm_release_clone_request_fn) (struct request *clone,
 68					     union map_info *map_context);
 69
 70/*
 71 * Returns:
 72 * < 0 : error (currently ignored)
 73 * 0   : ended successfully
 74 * 1   : for some reason the io has still not completed (eg,
 75 *       multipath target might want to requeue a failed io).
 76 * 2   : The target wants to push back the io
 77 */
 78typedef int (*dm_endio_fn) (struct dm_target *ti,
 79			    struct bio *bio, blk_status_t *error);
 80typedef int (*dm_request_endio_fn) (struct dm_target *ti,
 81				    struct request *clone, blk_status_t error,
 82				    union map_info *map_context);
 83
 84typedef void (*dm_presuspend_fn) (struct dm_target *ti);
 85typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
 86typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
 87typedef int (*dm_preresume_fn) (struct dm_target *ti);
 88typedef void (*dm_resume_fn) (struct dm_target *ti);
 89
 90typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
 91			      unsigned int status_flags, char *result, unsigned int maxlen);
 92
 93typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
 94			      char *result, unsigned int maxlen);
 95
 96typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
 97
 98#ifdef CONFIG_BLK_DEV_ZONED
 99typedef int (*dm_report_zones_fn) (struct dm_target *ti,
100				   struct dm_report_zones_args *args,
101				   unsigned int nr_zones);
102#else
103/*
104 * Define dm_report_zones_fn so that targets can assign to NULL if
105 * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
106 * awkward #ifdefs in their target_type, etc.
107 */
108typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
109#endif
110
111/*
112 * These iteration functions are typically used to check (and combine)
113 * properties of underlying devices.
114 * E.g. Does at least one underlying device support flush?
115 *      Does any underlying device not support WRITE_SAME?
116 *
117 * The callout function is called once for each contiguous section of
118 * an underlying device.  State can be maintained in *data.
119 * Return non-zero to stop iterating through any further devices.
120 */
121typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
122					   struct dm_dev *dev,
123					   sector_t start, sector_t len,
124					   void *data);
125
126/*
127 * This function must iterate through each section of device used by the
128 * target until it encounters a non-zero return code, which it then returns.
129 * Returns zero if no callout returned non-zero.
130 */
131typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
132				      iterate_devices_callout_fn fn,
133				      void *data);
134
135typedef void (*dm_io_hints_fn) (struct dm_target *ti,
136				struct queue_limits *limits);
137
138/*
139 * Returns:
140 *    0: The target can handle the next I/O immediately.
141 *    1: The target can't handle the next I/O immediately.
142 */
143typedef int (*dm_busy_fn) (struct dm_target *ti);
144
145/*
146 * Returns:
147 *  < 0 : error
148 * >= 0 : the number of bytes accessible at the address
149 */
150typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
151		long nr_pages, enum dax_access_mode node, void **kaddr,
152		pfn_t *pfn);
 
153typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
154		size_t nr_pages);
155
156/*
157 * Returns:
158 * != 0 : number of bytes transferred
159 * 0    : recovery write failed
160 */
161typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
162		void *addr, size_t bytes, struct iov_iter *i);
163
164void dm_error(const char *message);
165
166struct dm_dev {
167	struct block_device *bdev;
168	struct file *bdev_file;
169	struct dax_device *dax_dev;
170	blk_mode_t mode;
171	char name[16];
172};
173
 
 
174/*
175 * Constructors should call these functions to ensure destination devices
176 * are opened/closed correctly.
177 */
178int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
179		  struct dm_dev **result);
180void dm_put_device(struct dm_target *ti, struct dm_dev *d);
181
182/*
183 * Helper function for getting devices
184 */
185int dm_devt_from_path(const char *path, dev_t *dev_p);
186
187/*
188 * Information about a target type
189 */
190
191struct target_type {
192	uint64_t features;
193	const char *name;
194	struct module *module;
195	unsigned int version[3];
196	dm_ctr_fn ctr;
197	dm_dtr_fn dtr;
198	dm_map_fn map;
199	dm_clone_and_map_request_fn clone_and_map_rq;
200	dm_release_clone_request_fn release_clone_rq;
201	dm_endio_fn end_io;
202	dm_request_endio_fn rq_end_io;
203	dm_presuspend_fn presuspend;
204	dm_presuspend_undo_fn presuspend_undo;
205	dm_postsuspend_fn postsuspend;
206	dm_preresume_fn preresume;
207	dm_resume_fn resume;
208	dm_status_fn status;
209	dm_message_fn message;
210	dm_prepare_ioctl_fn prepare_ioctl;
211	dm_report_zones_fn report_zones;
212	dm_busy_fn busy;
213	dm_iterate_devices_fn iterate_devices;
214	dm_io_hints_fn io_hints;
215	dm_dax_direct_access_fn direct_access;
 
 
216	dm_dax_zero_page_range_fn dax_zero_page_range;
217	dm_dax_recovery_write_fn dax_recovery_write;
218
219	/* For internal device-mapper use. */
220	struct list_head list;
221};
222
223/*
224 * Target features
225 */
226
227/*
228 * Any table that contains an instance of this target must have only one.
229 */
230#define DM_TARGET_SINGLETON		0x00000001
231#define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
232
233/*
234 * Indicates that a target does not support read-only devices.
235 */
236#define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
237#define dm_target_always_writeable(type) \
238		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
239
240/*
241 * Any device that contains a table with an instance of this target may never
242 * have tables containing any different target type.
243 */
244#define DM_TARGET_IMMUTABLE		0x00000004
245#define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
246
247/*
248 * Indicates that a target may replace any target; even immutable targets.
249 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
250 */
251#define DM_TARGET_WILDCARD		0x00000008
252#define dm_target_is_wildcard(type)	((type)->features & DM_TARGET_WILDCARD)
253
254/*
255 * A target implements own bio data integrity.
256 */
257#define DM_TARGET_INTEGRITY		0x00000010
258#define dm_target_has_integrity(type)	((type)->features & DM_TARGET_INTEGRITY)
259
260/*
261 * A target passes integrity data to the lower device.
262 */
263#define DM_TARGET_PASSES_INTEGRITY	0x00000020
264#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
265
266/*
267 * Indicates support for zoned block devices:
268 * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
269 *   block devices but does not support combining different zoned models.
270 * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
271 *   devices with different zoned models.
272 */
273#ifdef CONFIG_BLK_DEV_ZONED
274#define DM_TARGET_ZONED_HM		0x00000040
275#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
276#else
277#define DM_TARGET_ZONED_HM		0x00000000
278#define dm_target_supports_zoned_hm(type) (false)
279#endif
280
281/*
282 * A target handles REQ_NOWAIT
283 */
284#define DM_TARGET_NOWAIT		0x00000080
285#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
286
287/*
288 * A target supports passing through inline crypto support.
289 */
290#define DM_TARGET_PASSES_CRYPTO		0x00000100
291#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
292
293#ifdef CONFIG_BLK_DEV_ZONED
294#define DM_TARGET_MIXED_ZONED_MODEL	0x00000200
295#define dm_target_supports_mixed_zoned_model(type) \
296	((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
297#else
298#define DM_TARGET_MIXED_ZONED_MODEL	0x00000000
299#define dm_target_supports_mixed_zoned_model(type) (false)
300#endif
301
302struct dm_target {
303	struct dm_table *table;
304	struct target_type *type;
305
306	/* target limits */
307	sector_t begin;
308	sector_t len;
309
310	/* If non-zero, maximum size of I/O submitted to a target. */
311	uint32_t max_io_len;
312
313	/*
314	 * A number of zero-length barrier bios that will be submitted
315	 * to the target for the purpose of flushing cache.
316	 *
317	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
318	 * It is a responsibility of the target driver to remap these bios
319	 * to the real underlying devices.
320	 */
321	unsigned int num_flush_bios;
322
323	/*
324	 * The number of discard bios that will be submitted to the target.
325	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
326	 */
327	unsigned int num_discard_bios;
328
329	/*
330	 * The number of secure erase bios that will be submitted to the target.
331	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
332	 */
333	unsigned int num_secure_erase_bios;
 
 
 
 
 
 
334
335	/*
336	 * The number of WRITE ZEROES bios that will be submitted to the target.
337	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
338	 */
339	unsigned int num_write_zeroes_bios;
340
341	/*
342	 * The minimum number of extra bytes allocated in each io for the
343	 * target to use.
344	 */
345	unsigned int per_io_data_size;
346
347	/* target specific data */
348	void *private;
349
350	/* Used to provide an error string from the ctr */
351	char *error;
352
353	/*
354	 * Set if this target needs to receive flushes regardless of
355	 * whether or not its underlying devices have support.
356	 */
357	bool flush_supported:1;
358
359	/*
360	 * Set if this target needs to receive discards regardless of
361	 * whether or not its underlying devices have support.
362	 */
363	bool discards_supported:1;
364
365	/*
366	 * Automatically set by dm-core if this target supports
367	 * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated
368	 * using REQ_OP_ZONE_RESET. Target drivers must not set this manually.
369	 */
370	bool zone_reset_all_supported:1;
371
372	/*
373	 * Set if this target requires that discards be split on
374	 * 'max_discard_sectors' boundaries.
375	 */
376	bool max_discard_granularity:1;
377
378	/*
379	 * Set if we need to limit the number of in-flight bios when swapping.
380	 */
381	bool limit_swap_bios:1;
382
383	/*
384	 * Set if this target implements a zoned device and needs emulation of
385	 * zone append operations using regular writes.
386	 */
387	bool emulate_zone_append:1;
388
389	/*
390	 * Set if the target will submit IO using dm_submit_bio_remap()
391	 * after returning DM_MAPIO_SUBMITTED from its map function.
392	 */
393	bool accounts_remapped_io:1;
394
395	/*
396	 * Set if the target will submit the DM bio without first calling
397	 * bio_set_dev(). NOTE: ideally a target should _not_ need this.
398	 */
399	bool needs_bio_set_dev:1;
400
401	/*
402	 * Set if the target supports flush optimization. If all the targets in
403	 * a table have flush_bypasses_map set, the dm core will not send
404	 * flushes to the targets via a ->map method. It will iterate over
405	 * dm_table->devices and send flushes to the devices directly. This
406	 * optimization reduces the number of flushes being sent when multiple
407	 * targets in a table use the same underlying device.
408	 *
409	 * This optimization may be enabled on targets that just pass the
410	 * flushes to the underlying devices without performing any other
411	 * actions on the flush request. Currently, dm-linear and dm-stripe
412	 * support it.
413	 */
414	bool flush_bypasses_map:1;
415
416	/*
417	 * Set if the target calls bio_integrity_alloc on bios received
418	 * in the map method.
419	 */
420	bool mempool_needs_integrity:1;
421};
422
423void *dm_per_bio_data(struct bio *bio, size_t data_size);
424struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
425unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
426
427u64 dm_start_time_ns_from_clone(struct bio *bio);
428
429int dm_register_target(struct target_type *t);
430void dm_unregister_target(struct target_type *t);
431
432/*
433 * Target argument parsing.
434 */
435struct dm_arg_set {
436	unsigned int argc;
437	char **argv;
438};
439
440/*
441 * The minimum and maximum value of a numeric argument, together with
442 * the error message to use if the number is found to be outside that range.
443 */
444struct dm_arg {
445	unsigned int min;
446	unsigned int max;
447	char *error;
448};
449
450/*
451 * Validate the next argument, either returning it as *value or, if invalid,
452 * returning -EINVAL and setting *error.
453 */
454int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
455		unsigned int *value, char **error);
456
457/*
458 * Process the next argument as the start of a group containing between
459 * arg->min and arg->max further arguments. Either return the size as
460 * *num_args or, if invalid, return -EINVAL and set *error.
461 */
462int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
463		      unsigned int *num_args, char **error);
464
465/*
466 * Return the current argument and shift to the next.
467 */
468const char *dm_shift_arg(struct dm_arg_set *as);
469
470/*
471 * Move through num_args arguments.
472 */
473void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
474
475/*
476 *----------------------------------------------------------------
477 * Functions for creating and manipulating mapped devices.
478 * Drop the reference with dm_put when you finish with the object.
479 *----------------------------------------------------------------
480 */
481
482/*
483 * DM_ANY_MINOR chooses the next available minor number.
484 */
485#define DM_ANY_MINOR (-1)
486int dm_create(int minor, struct mapped_device **md);
487
488/*
489 * Reference counting for md.
490 */
491struct mapped_device *dm_get_md(dev_t dev);
492void dm_get(struct mapped_device *md);
493int dm_hold(struct mapped_device *md);
494void dm_put(struct mapped_device *md);
495
496/*
497 * An arbitrary pointer may be stored alongside a mapped device.
498 */
499void dm_set_mdptr(struct mapped_device *md, void *ptr);
500void *dm_get_mdptr(struct mapped_device *md);
501
502/*
503 * A device can still be used while suspended, but I/O is deferred.
504 */
505int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
506int dm_resume(struct mapped_device *md);
507
508/*
509 * Event functions.
510 */
511uint32_t dm_get_event_nr(struct mapped_device *md);
512int dm_wait_event(struct mapped_device *md, int event_nr);
513uint32_t dm_next_uevent_seq(struct mapped_device *md);
514void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
515
516/*
517 * Info functions.
518 */
519const char *dm_device_name(struct mapped_device *md);
520int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
521struct gendisk *dm_disk(struct mapped_device *md);
522int dm_suspended(struct dm_target *ti);
523int dm_post_suspending(struct dm_target *ti);
524int dm_noflush_suspending(struct dm_target *ti);
525void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
526void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
527
528#ifdef CONFIG_BLK_DEV_ZONED
529struct dm_report_zones_args {
530	struct dm_target *tgt;
531	sector_t next_sector;
532
533	void *orig_data;
534	report_zones_cb orig_cb;
535	unsigned int zone_idx;
536
537	/* must be filled by ->report_zones before calling dm_report_zones_cb */
538	sector_t start;
539};
540int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
541		    struct dm_report_zones_args *args, unsigned int nr_zones);
542#endif /* CONFIG_BLK_DEV_ZONED */
543
544/*
545 * Device mapper functions to parse and create devices specified by the
546 * parameter "dm-mod.create="
547 */
548int __init dm_early_create(struct dm_ioctl *dmi,
549			   struct dm_target_spec **spec_array,
550			   char **target_params_array);
551
 
 
552/*
553 * Geometry functions.
554 */
555int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
556int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
557
558/*
559 *---------------------------------------------------------------
560 * Functions for manipulating device-mapper tables.
561 *---------------------------------------------------------------
562 */
563
564/*
565 * First create an empty table.
566 */
567int dm_table_create(struct dm_table **result, blk_mode_t mode,
568		    unsigned int num_targets, struct mapped_device *md);
569
570/*
571 * Then call this once for each target.
572 */
573int dm_table_add_target(struct dm_table *t, const char *type,
574			sector_t start, sector_t len, char *params);
575
576/*
577 * Target can use this to set the table's type.
578 * Can only ever be called from a target's ctr.
579 * Useful for "hybrid" target (supports both bio-based
580 * and request-based).
581 */
582void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
583
584/*
585 * Finally call this to make the table ready for use.
586 */
587int dm_table_complete(struct dm_table *t);
588
589/*
590 * Destroy the table when finished.
591 */
592void dm_table_destroy(struct dm_table *t);
593
594/*
595 * Target may require that it is never sent I/O larger than len.
596 */
597int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
598
599/*
600 * Table reference counting.
601 */
602struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
603void dm_put_live_table(struct mapped_device *md, int srcu_idx);
604void dm_sync_table(struct mapped_device *md);
605
606/*
607 * Queries
608 */
609sector_t dm_table_get_size(struct dm_table *t);
610blk_mode_t dm_table_get_mode(struct dm_table *t);
 
611struct mapped_device *dm_table_get_md(struct dm_table *t);
612const char *dm_table_device_name(struct dm_table *t);
613
614/*
615 * Trigger an event.
616 */
617void dm_table_event(struct dm_table *t);
618
619/*
620 * Run the queue for request-based targets.
621 */
622void dm_table_run_md_queue_async(struct dm_table *t);
623
624/*
625 * The device must be suspended before calling this method.
626 * Returns the previous table, which the caller must destroy.
627 */
628struct dm_table *dm_swap_table(struct mapped_device *md,
629			       struct dm_table *t);
630
631/*
632 * Table blk_crypto_profile functions
633 */
634void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
635
636/*
637 *---------------------------------------------------------------
638 * Macros.
639 *---------------------------------------------------------------
640 */
641#define DM_NAME "device-mapper"
642
643#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
644
645#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
646
647#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
648#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
649#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
650#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
651#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
652#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
653
654#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
655#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
656
657#define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
658
659#define DMEMIT_TARGET_NAME_VERSION(y) \
660		DMEMIT("target_name=%s,target_version=%u.%u.%u", \
661		       (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
662
663/**
664 * module_dm() - Helper macro for DM targets that don't do anything
665 * special in their module_init and module_exit.
666 * Each module may only use this macro once, and calling it replaces
667 * module_init() and module_exit().
668 *
669 * @name: DM target's name
670 */
671#define module_dm(name) \
672static int __init dm_##name##_init(void) \
673{ \
674	return dm_register_target(&(name##_target)); \
675} \
676module_init(dm_##name##_init) \
677static void __exit dm_##name##_exit(void) \
678{ \
679	dm_unregister_target(&(name##_target)); \
680} \
681module_exit(dm_##name##_exit)
682
683/*
684 * Definitions of return values from target end_io function.
685 */
686#define DM_ENDIO_DONE		0
687#define DM_ENDIO_INCOMPLETE	1
688#define DM_ENDIO_REQUEUE	2
689#define DM_ENDIO_DELAY_REQUEUE	3
690
691/*
692 * Definitions of return values from target map function.
693 */
694#define DM_MAPIO_SUBMITTED	0
695#define DM_MAPIO_REMAPPED	1
696#define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
697#define DM_MAPIO_DELAY_REQUEUE	DM_ENDIO_DELAY_REQUEUE
698#define DM_MAPIO_KILL		4
699
700#define dm_sector_div64(x, y)( \
701{ \
702	u64 _res; \
703	(x) = div64_u64_rem(x, y, &_res); \
704	_res; \
705} \
706)
707
708/*
709 * Ceiling(n / sz)
710 */
711#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
712
713#define dm_sector_div_up(n, sz) ( \
714{ \
715	sector_t _r = ((n) + (sz) - 1); \
716	sector_div(_r, (sz)); \
717	_r; \
718} \
719)
720
721/*
722 * ceiling(n / size) * size
723 */
724#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
725
726/*
727 * Sector offset taken relative to the start of the target instead of
728 * relative to the start of the device.
729 */
730#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
731
732static inline sector_t to_sector(unsigned long long n)
733{
734	return (n >> SECTOR_SHIFT);
735}
736
737static inline unsigned long to_bytes(sector_t n)
738{
739	return (n << SECTOR_SHIFT);
740}
741
742#endif	/* _LINUX_DEVICE_MAPPER_H */