Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.8
  1/*
  2 * Internal header file for device mapper
  3 *
  4 * Copyright (C) 2001, 2002 Sistina Software
  5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  6 *
  7 * This file is released under the LGPL.
  8 */
  9
 10#ifndef DM_INTERNAL_H
 11#define DM_INTERNAL_H
 12
 13#include <linux/fs.h>
 14#include <linux/device-mapper.h>
 15#include <linux/list.h>
 16#include <linux/moduleparam.h>
 17#include <linux/blkdev.h>
 18#include <linux/backing-dev.h>
 19#include <linux/hdreg.h>
 20#include <linux/completion.h>
 21#include <linux/kobject.h>
 22#include <linux/refcount.h>
 23#include <linux/log2.h>
 24
 25#include "dm-stats.h"
 26
 27/*
 28 * Suspend feature flags
 29 */
 30#define DM_SUSPEND_LOCKFS_FLAG		(1 << 0)
 31#define DM_SUSPEND_NOFLUSH_FLAG		(1 << 1)
 32
 33/*
 34 * Status feature flags
 35 */
 36#define DM_STATUS_NOFLUSH_FLAG		(1 << 0)
 37
 38/*
 
 
 
 
 
 
 
 
 39 * List of devices that a metadevice uses and should open/close.
 40 */
 41struct dm_dev_internal {
 42	struct list_head list;
 43	refcount_t count;
 44	struct dm_dev *dm_dev;
 45};
 46
 47struct dm_table;
 48struct dm_md_mempools;
 49struct dm_target_io;
 50struct dm_io;
 51
 52/*
 53 *---------------------------------------------------------------
 54 * Internal table functions.
 55 *---------------------------------------------------------------
 56 */
 57void dm_table_event_callback(struct dm_table *t,
 58			     void (*fn)(void *), void *context);
 
 59struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
 60bool dm_table_has_no_data_devices(struct dm_table *table);
 61int dm_calculate_queue_limits(struct dm_table *table,
 62			      struct queue_limits *limits);
 63int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 64			      struct queue_limits *limits);
 65struct list_head *dm_table_get_devices(struct dm_table *t);
 66void dm_table_presuspend_targets(struct dm_table *t);
 67void dm_table_presuspend_undo_targets(struct dm_table *t);
 68void dm_table_postsuspend_targets(struct dm_table *t);
 69int dm_table_resume_targets(struct dm_table *t);
 70enum dm_queue_mode dm_table_get_type(struct dm_table *t);
 
 71struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
 72struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
 73struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
 74bool dm_table_bio_based(struct dm_table *t);
 75bool dm_table_request_based(struct dm_table *t);
 
 
 
 76
 77void dm_lock_md_type(struct mapped_device *md);
 78void dm_unlock_md_type(struct mapped_device *md);
 79void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
 80enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
 81struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
 82
 83int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
 84
 85/*
 
 
 
 
 
 86 * To check whether the target type is bio-based or not (request-based).
 87 */
 88#define dm_target_bio_based(t) ((t)->type->map != NULL)
 89
 90/*
 91 * To check whether the target type is request-based or not (bio-based).
 92 */
 93#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
 
 94
 95/*
 96 * To check whether the target type is a hybrid (capable of being
 97 * either request-based or bio-based).
 98 */
 99#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
100
101/*
102 * Zoned targets related functions.
103 */
104int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
105void dm_zone_endio(struct dm_io *io, struct bio *clone);
106#ifdef CONFIG_BLK_DEV_ZONED
107void dm_cleanup_zoned_dev(struct mapped_device *md);
108int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
109			unsigned int nr_zones, report_zones_cb cb, void *data);
110bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
111int dm_zone_map_bio(struct dm_target_io *io);
112#else
113static inline void dm_cleanup_zoned_dev(struct mapped_device *md) {}
114#define dm_blk_report_zones	NULL
115static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
116{
117	return false;
118}
119static inline int dm_zone_map_bio(struct dm_target_io *tio)
120{
121	return DM_MAPIO_KILL;
122}
123#endif
124
125/*
126 *---------------------------------------------------------------
127 * A registry of target types.
128 *---------------------------------------------------------------
129 */
130int dm_target_init(void);
131void dm_target_exit(void);
132struct target_type *dm_get_target_type(const char *name);
133void dm_put_target_type(struct target_type *tt);
134int dm_target_iterate(void (*iter_func)(struct target_type *tt,
135					void *param), void *param);
136
137int dm_split_args(int *argc, char ***argvp, char *input);
138
139/*
140 * Is this mapped_device being deleted?
141 */
142int dm_deleting_md(struct mapped_device *md);
143
144/*
145 * Is this mapped_device suspended?
146 */
147int dm_suspended_md(struct mapped_device *md);
148
149/*
150 * Internal suspend and resume methods.
151 */
152int dm_suspended_internally_md(struct mapped_device *md);
153void dm_internal_suspend_fast(struct mapped_device *md);
154void dm_internal_resume_fast(struct mapped_device *md);
155void dm_internal_suspend_noflush(struct mapped_device *md);
156void dm_internal_resume(struct mapped_device *md);
157
158/*
159 * Test if the device is scheduled for deferred remove.
160 */
161int dm_test_deferred_remove_flag(struct mapped_device *md);
162
163/*
164 * Try to remove devices marked for deferred removal.
165 */
166void dm_deferred_remove(void);
167
168/*
169 * The device-mapper can be driven through one of two interfaces;
170 * ioctl or filesystem, depending which patch you have applied.
171 */
172int dm_interface_init(void);
173void dm_interface_exit(void);
174
175/*
176 * sysfs interface
177 */
 
 
 
 
 
 
 
 
 
 
178int dm_sysfs_init(struct mapped_device *md);
179void dm_sysfs_exit(struct mapped_device *md);
180struct kobject *dm_kobject(struct mapped_device *md);
181struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
182
183/*
184 * The kobject helper
185 */
186void dm_kobject_release(struct kobject *kobj);
187
188/*
189 * Targets for linear and striped mappings
190 */
191int linear_map(struct dm_target *ti, struct bio *bio);
192int dm_linear_init(void);
193void dm_linear_exit(void);
194
195int stripe_map(struct dm_target *ti, struct bio *bio);
196int dm_stripe_init(void);
197void dm_stripe_exit(void);
198
199/*
200 * mapped_device operations
201 */
202void dm_destroy(struct mapped_device *md);
203void dm_destroy_immediate(struct mapped_device *md);
204int dm_open_count(struct mapped_device *md);
205int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
206int dm_cancel_deferred_remove(struct mapped_device *md);
207int dm_request_based(struct mapped_device *md);
208int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
 
 
209			struct dm_dev **result);
210void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
 
211
212int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
213		      unsigned int cookie, bool need_resize_uevent);
 
 
 
 
 
214
215int dm_io_init(void);
216void dm_io_exit(void);
217
218int dm_kcopyd_init(void);
219void dm_kcopyd_exit(void);
220
221/*
222 * Mempool operations
223 */
 
 
224void dm_free_md_mempools(struct dm_md_mempools *pools);
225
226/*
227 * Various helpers
228 */
229unsigned int dm_get_reserved_bio_based_ios(void);
 
230
231#define DM_HASH_LOCKS_MAX 64
232
233static inline unsigned int dm_num_hash_locks(void)
234{
235	unsigned int num_locks = roundup_pow_of_two(num_online_cpus()) << 1;
236
237	return min_t(unsigned int, num_locks, DM_HASH_LOCKS_MAX);
238}
239
240#define DM_HASH_LOCKS_MULT  4294967291ULL
241#define DM_HASH_LOCKS_SHIFT 6
242
243static inline unsigned int dm_hash_locks_index(sector_t block,
244					       unsigned int num_locks)
245{
246	sector_t h1 = (block * DM_HASH_LOCKS_MULT) >> DM_HASH_LOCKS_SHIFT;
247	sector_t h2 = h1 >> DM_HASH_LOCKS_SHIFT;
248
249	return (h1 ^ h2) & (num_locks - 1);
250}
251
252#endif
v4.6
  1/*
  2 * Internal header file for device mapper
  3 *
  4 * Copyright (C) 2001, 2002 Sistina Software
  5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  6 *
  7 * This file is released under the LGPL.
  8 */
  9
 10#ifndef DM_INTERNAL_H
 11#define DM_INTERNAL_H
 12
 13#include <linux/fs.h>
 14#include <linux/device-mapper.h>
 15#include <linux/list.h>
 
 16#include <linux/blkdev.h>
 17#include <linux/backing-dev.h>
 18#include <linux/hdreg.h>
 19#include <linux/completion.h>
 20#include <linux/kobject.h>
 
 
 21
 22#include "dm-stats.h"
 23
 24/*
 25 * Suspend feature flags
 26 */
 27#define DM_SUSPEND_LOCKFS_FLAG		(1 << 0)
 28#define DM_SUSPEND_NOFLUSH_FLAG		(1 << 1)
 29
 30/*
 31 * Status feature flags
 32 */
 33#define DM_STATUS_NOFLUSH_FLAG		(1 << 0)
 34
 35/*
 36 * Type of table and mapped_device's mempool
 37 */
 38#define DM_TYPE_NONE			0
 39#define DM_TYPE_BIO_BASED		1
 40#define DM_TYPE_REQUEST_BASED		2
 41#define DM_TYPE_MQ_REQUEST_BASED	3
 42
 43/*
 44 * List of devices that a metadevice uses and should open/close.
 45 */
 46struct dm_dev_internal {
 47	struct list_head list;
 48	atomic_t count;
 49	struct dm_dev *dm_dev;
 50};
 51
 52struct dm_table;
 53struct dm_md_mempools;
 
 
 54
 55/*-----------------------------------------------------------------
 
 56 * Internal table functions.
 57 *---------------------------------------------------------------*/
 58void dm_table_destroy(struct dm_table *t);
 59void dm_table_event_callback(struct dm_table *t,
 60			     void (*fn)(void *), void *context);
 61struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
 62struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
 63bool dm_table_has_no_data_devices(struct dm_table *table);
 64int dm_calculate_queue_limits(struct dm_table *table,
 65			      struct queue_limits *limits);
 66void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 67			       struct queue_limits *limits);
 68struct list_head *dm_table_get_devices(struct dm_table *t);
 69void dm_table_presuspend_targets(struct dm_table *t);
 70void dm_table_presuspend_undo_targets(struct dm_table *t);
 71void dm_table_postsuspend_targets(struct dm_table *t);
 72int dm_table_resume_targets(struct dm_table *t);
 73int dm_table_any_congested(struct dm_table *t, int bdi_bits);
 74unsigned dm_table_get_type(struct dm_table *t);
 75struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
 76struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
 77struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
 
 78bool dm_table_request_based(struct dm_table *t);
 79bool dm_table_mq_request_based(struct dm_table *t);
 80void dm_table_free_md_mempools(struct dm_table *t);
 81struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
 82
 83void dm_lock_md_type(struct mapped_device *md);
 84void dm_unlock_md_type(struct mapped_device *md);
 85void dm_set_md_type(struct mapped_device *md, unsigned type);
 86unsigned dm_get_md_type(struct mapped_device *md);
 87struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
 88
 89int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
 90
 91/*
 92 * To check the return value from dm_table_find_target().
 93 */
 94#define dm_target_is_valid(t) ((t)->table)
 95
 96/*
 97 * To check whether the target type is bio-based or not (request-based).
 98 */
 99#define dm_target_bio_based(t) ((t)->type->map != NULL)
100
101/*
102 * To check whether the target type is request-based or not (bio-based).
103 */
104#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \
105				    ((t)->type->clone_and_map_rq != NULL))
106
107/*
108 * To check whether the target type is a hybrid (capable of being
109 * either request-based or bio-based).
110 */
111#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
112
113/*-----------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114 * A registry of target types.
115 *---------------------------------------------------------------*/
 
116int dm_target_init(void);
117void dm_target_exit(void);
118struct target_type *dm_get_target_type(const char *name);
119void dm_put_target_type(struct target_type *tt);
120int dm_target_iterate(void (*iter_func)(struct target_type *tt,
121					void *param), void *param);
122
123int dm_split_args(int *argc, char ***argvp, char *input);
124
125/*
126 * Is this mapped_device being deleted?
127 */
128int dm_deleting_md(struct mapped_device *md);
129
130/*
131 * Is this mapped_device suspended?
132 */
133int dm_suspended_md(struct mapped_device *md);
134
135/*
136 * Internal suspend and resume methods.
137 */
138int dm_suspended_internally_md(struct mapped_device *md);
139void dm_internal_suspend_fast(struct mapped_device *md);
140void dm_internal_resume_fast(struct mapped_device *md);
141void dm_internal_suspend_noflush(struct mapped_device *md);
142void dm_internal_resume(struct mapped_device *md);
143
144/*
145 * Test if the device is scheduled for deferred remove.
146 */
147int dm_test_deferred_remove_flag(struct mapped_device *md);
148
149/*
150 * Try to remove devices marked for deferred removal.
151 */
152void dm_deferred_remove(void);
153
154/*
155 * The device-mapper can be driven through one of two interfaces;
156 * ioctl or filesystem, depending which patch you have applied.
157 */
158int dm_interface_init(void);
159void dm_interface_exit(void);
160
161/*
162 * sysfs interface
163 */
164struct dm_kobject_holder {
165	struct kobject kobj;
166	struct completion completion;
167};
168
169static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
170{
171	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
172}
173
174int dm_sysfs_init(struct mapped_device *md);
175void dm_sysfs_exit(struct mapped_device *md);
176struct kobject *dm_kobject(struct mapped_device *md);
177struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
178
179/*
180 * The kobject helper
181 */
182void dm_kobject_release(struct kobject *kobj);
183
184/*
185 * Targets for linear and striped mappings
186 */
 
187int dm_linear_init(void);
188void dm_linear_exit(void);
189
 
190int dm_stripe_init(void);
191void dm_stripe_exit(void);
192
193/*
194 * mapped_device operations
195 */
196void dm_destroy(struct mapped_device *md);
197void dm_destroy_immediate(struct mapped_device *md);
198int dm_open_count(struct mapped_device *md);
199int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
200int dm_cancel_deferred_remove(struct mapped_device *md);
201int dm_request_based(struct mapped_device *md);
202sector_t dm_get_size(struct mapped_device *md);
203struct request_queue *dm_get_md_queue(struct mapped_device *md);
204int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
205			struct dm_dev **result);
206void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
207struct dm_stats *dm_get_stats(struct mapped_device *md);
208
209int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
210		      unsigned cookie);
211
212void dm_internal_suspend(struct mapped_device *md);
213void dm_internal_resume(struct mapped_device *md);
214
215bool dm_use_blk_mq(struct mapped_device *md);
216
217int dm_io_init(void);
218void dm_io_exit(void);
219
220int dm_kcopyd_init(void);
221void dm_kcopyd_exit(void);
222
223/*
224 * Mempool operations
225 */
226struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
227					    unsigned integrity, unsigned per_bio_data_size);
228void dm_free_md_mempools(struct dm_md_mempools *pools);
229
230/*
231 * Helpers that are used by DM core
232 */
233unsigned dm_get_reserved_bio_based_ios(void);
234unsigned dm_get_reserved_rq_based_ios(void);
235
236static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
 
 
237{
238	return !maxlen || strlen(result) + 1 >= maxlen;
 
 
239}
240
241ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
242ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
243						     const char *buf, size_t count);
 
 
 
 
 
 
 
 
244
245#endif