Linux Audio

Check our new training course

Loading...
v6.2
 
  1/*
  2 * Copyright (C) 2001 Sistina Software (UK) Limited
  3 *
  4 * This file is released under the GPL.
  5 */
  6
  7#include "dm-core.h"
  8
  9#include <linux/module.h>
 10#include <linux/init.h>
 11#include <linux/kmod.h>
 12#include <linux/bio.h>
 13#include <linux/dax.h>
 14
 15#define DM_MSG_PREFIX "target"
 16
 17static LIST_HEAD(_targets);
 18static DECLARE_RWSEM(_lock);
 19
 20static inline struct target_type *__find_target_type(const char *name)
 21{
 22	struct target_type *tt;
 23
 24	list_for_each_entry(tt, &_targets, list)
 25		if (!strcmp(name, tt->name))
 26			return tt;
 27
 28	return NULL;
 29}
 30
 31static struct target_type *get_target_type(const char *name)
 32{
 33	struct target_type *tt;
 34
 35	down_read(&_lock);
 36
 37	tt = __find_target_type(name);
 38	if (tt && !try_module_get(tt->module))
 39		tt = NULL;
 40
 41	up_read(&_lock);
 42	return tt;
 43}
 44
 45static void load_module(const char *name)
 46{
 47	request_module("dm-%s", name);
 48}
 49
 50struct target_type *dm_get_target_type(const char *name)
 51{
 52	struct target_type *tt = get_target_type(name);
 53
 54	if (!tt) {
 55		load_module(name);
 56		tt = get_target_type(name);
 57	}
 58
 59	return tt;
 60}
 61
 62void dm_put_target_type(struct target_type *tt)
 63{
 64	down_read(&_lock);
 65	module_put(tt->module);
 66	up_read(&_lock);
 67}
 68
 69int dm_target_iterate(void (*iter_func)(struct target_type *tt,
 70					void *param), void *param)
 71{
 72	struct target_type *tt;
 73
 74	down_read(&_lock);
 75	list_for_each_entry(tt, &_targets, list)
 76		iter_func(tt, param);
 77	up_read(&_lock);
 78
 79	return 0;
 80}
 81
 82int dm_register_target(struct target_type *tt)
 83{
 84	int rv = 0;
 85
 86	down_write(&_lock);
 87	if (__find_target_type(tt->name))
 
 
 88		rv = -EEXIST;
 89	else
 90		list_add(&tt->list, &_targets);
 91
 92	up_write(&_lock);
 
 93	return rv;
 94}
 
 95
 96void dm_unregister_target(struct target_type *tt)
 97{
 98	down_write(&_lock);
 99	if (!__find_target_type(tt->name)) {
100		DMCRIT("Unregistering unrecognised target: %s", tt->name);
101		BUG();
102	}
103
104	list_del(&tt->list);
105
106	up_write(&_lock);
107}
 
108
109/*
110 * io-err: always fails an io, useful for bringing
111 * up LVs that have holes in them.
112 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
114{
115	/*
 
 
 
 
 
 
 
 
 
 
 
 
116	 * Return error for discards instead of -EOPNOTSUPP
117	 */
118	tt->num_discard_bios = 1;
 
119
120	return 0;
121}
122
123static void io_err_dtr(struct dm_target *tt)
124{
125	/* empty */
 
 
 
 
 
126}
127
128static int io_err_map(struct dm_target *tt, struct bio *bio)
129{
130	return DM_MAPIO_KILL;
131}
132
133static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
134				   union map_info *map_context,
135				   struct request **clone)
136{
137	return DM_MAPIO_KILL;
138}
139
140static void io_err_release_clone_rq(struct request *clone,
141				    union map_info *map_context)
142{
143}
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
146		long nr_pages, enum dax_access_mode mode, void **kaddr,
147		pfn_t *pfn)
148{
149	return -EIO;
150}
151
152static struct target_type error_target = {
153	.name = "error",
154	.version = {1, 5, 0},
155	.features = DM_TARGET_WILDCARD,
156	.ctr  = io_err_ctr,
157	.dtr  = io_err_dtr,
158	.map  = io_err_map,
159	.clone_and_map_rq = io_err_clone_and_map_rq,
160	.release_clone_rq = io_err_release_clone_rq,
 
 
161	.direct_access = io_err_dax_direct_access,
 
162};
163
164int __init dm_target_init(void)
165{
166	return dm_register_target(&error_target);
167}
168
169void dm_target_exit(void)
170{
171	dm_unregister_target(&error_target);
172}
173
174EXPORT_SYMBOL(dm_register_target);
175EXPORT_SYMBOL(dm_unregister_target);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2001 Sistina Software (UK) Limited
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm-core.h"
  9
 10#include <linux/module.h>
 11#include <linux/init.h>
 12#include <linux/kmod.h>
 13#include <linux/bio.h>
 14#include <linux/dax.h>
 15
 16#define DM_MSG_PREFIX "target"
 17
 18static LIST_HEAD(_targets);
 19static DECLARE_RWSEM(_lock);
 20
 21static inline struct target_type *__find_target_type(const char *name)
 22{
 23	struct target_type *tt;
 24
 25	list_for_each_entry(tt, &_targets, list)
 26		if (!strcmp(name, tt->name))
 27			return tt;
 28
 29	return NULL;
 30}
 31
 32static struct target_type *get_target_type(const char *name)
 33{
 34	struct target_type *tt;
 35
 36	down_read(&_lock);
 37
 38	tt = __find_target_type(name);
 39	if (tt && !try_module_get(tt->module))
 40		tt = NULL;
 41
 42	up_read(&_lock);
 43	return tt;
 44}
 45
 46static void load_module(const char *name)
 47{
 48	request_module("dm-%s", name);
 49}
 50
 51struct target_type *dm_get_target_type(const char *name)
 52{
 53	struct target_type *tt = get_target_type(name);
 54
 55	if (!tt) {
 56		load_module(name);
 57		tt = get_target_type(name);
 58	}
 59
 60	return tt;
 61}
 62
 63void dm_put_target_type(struct target_type *tt)
 64{
 65	down_read(&_lock);
 66	module_put(tt->module);
 67	up_read(&_lock);
 68}
 69
 70int dm_target_iterate(void (*iter_func)(struct target_type *tt,
 71					void *param), void *param)
 72{
 73	struct target_type *tt;
 74
 75	down_read(&_lock);
 76	list_for_each_entry(tt, &_targets, list)
 77		iter_func(tt, param);
 78	up_read(&_lock);
 79
 80	return 0;
 81}
 82
 83int dm_register_target(struct target_type *tt)
 84{
 85	int rv = 0;
 86
 87	down_write(&_lock);
 88	if (__find_target_type(tt->name)) {
 89		DMERR("%s: '%s' target already registered",
 90		      __func__, tt->name);
 91		rv = -EEXIST;
 92	} else {
 93		list_add(&tt->list, &_targets);
 94	}
 95	up_write(&_lock);
 96
 97	return rv;
 98}
 99EXPORT_SYMBOL(dm_register_target);
100
101void dm_unregister_target(struct target_type *tt)
102{
103	down_write(&_lock);
104	if (!__find_target_type(tt->name)) {
105		DMCRIT("Unregistering unrecognised target: %s", tt->name);
106		BUG();
107	}
108
109	list_del(&tt->list);
110
111	up_write(&_lock);
112}
113EXPORT_SYMBOL(dm_unregister_target);
114
115/*
116 * io-err: always fails an io, useful for bringing
117 * up LVs that have holes in them.
118 */
119struct io_err_c {
120	struct dm_dev *dev;
121	sector_t start;
122};
123
124static int io_err_get_args(struct dm_target *tt, unsigned int argc, char **args)
125{
126	unsigned long long start;
127	struct io_err_c *ioec;
128	char dummy;
129	int ret;
130
131	ioec = kmalloc(sizeof(*ioec), GFP_KERNEL);
132	if (!ioec) {
133		tt->error = "Cannot allocate io_err context";
134		return -ENOMEM;
135	}
136
137	ret = -EINVAL;
138	if (sscanf(args[1], "%llu%c", &start, &dummy) != 1 ||
139	    start != (sector_t)start) {
140		tt->error = "Invalid device sector";
141		goto bad;
142	}
143	ioec->start = start;
144
145	ret = dm_get_device(tt, args[0], dm_table_get_mode(tt->table), &ioec->dev);
146	if (ret) {
147		tt->error = "Device lookup failed";
148		goto bad;
149	}
150
151	tt->private = ioec;
152
153	return 0;
154
155bad:
156	kfree(ioec);
157
158	return ret;
159}
160
161static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
162{
163	/*
164	 * If we have arguments, assume it is the path to the backing
165	 * block device and its mapping start sector (same as dm-linear).
166	 * In this case, get the device so that we can get its limits.
167	 */
168	if (argc == 2) {
169		int ret = io_err_get_args(tt, argc, args);
170
171		if (ret)
172			return ret;
173	}
174
175	/*
176	 * Return error for discards instead of -EOPNOTSUPP
177	 */
178	tt->num_discard_bios = 1;
179	tt->discards_supported = true;
180
181	return 0;
182}
183
184static void io_err_dtr(struct dm_target *tt)
185{
186	struct io_err_c *ioec = tt->private;
187
188	if (ioec) {
189		dm_put_device(tt, ioec->dev);
190		kfree(ioec);
191	}
192}
193
194static int io_err_map(struct dm_target *tt, struct bio *bio)
195{
196	return DM_MAPIO_KILL;
197}
198
199static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
200				   union map_info *map_context,
201				   struct request **clone)
202{
203	return DM_MAPIO_KILL;
204}
205
206static void io_err_release_clone_rq(struct request *clone,
207				    union map_info *map_context)
208{
209}
210
211#ifdef CONFIG_BLK_DEV_ZONED
212static sector_t io_err_map_sector(struct dm_target *ti, sector_t bi_sector)
213{
214	struct io_err_c *ioec = ti->private;
215
216	return ioec->start + dm_target_offset(ti, bi_sector);
217}
218
219static int io_err_report_zones(struct dm_target *ti,
220		struct dm_report_zones_args *args, unsigned int nr_zones)
221{
222	struct io_err_c *ioec = ti->private;
223
224	/*
225	 * This should never be called when we do not have a backing device
226	 * as that mean the target is not a zoned one.
227	 */
228	if (WARN_ON_ONCE(!ioec))
229		return -EIO;
230
231	return dm_report_zones(ioec->dev->bdev, ioec->start,
232			       io_err_map_sector(ti, args->next_sector),
233			       args, nr_zones);
234}
235#else
236#define io_err_report_zones NULL
237#endif
238
239static int io_err_iterate_devices(struct dm_target *ti,
240				  iterate_devices_callout_fn fn, void *data)
241{
242	struct io_err_c *ioec = ti->private;
243
244	if (!ioec)
245		return 0;
246
247	return fn(ti, ioec->dev, ioec->start, ti->len, data);
248}
249
250static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
251{
252	limits->max_hw_discard_sectors = UINT_MAX;
253	limits->discard_granularity = 512;
254}
255
256static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
257		long nr_pages, enum dax_access_mode mode, void **kaddr,
258		pfn_t *pfn)
259{
260	return -EIO;
261}
262
263static struct target_type error_target = {
264	.name = "error",
265	.version = {1, 7, 0},
266	.features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
267	.ctr  = io_err_ctr,
268	.dtr  = io_err_dtr,
269	.map  = io_err_map,
270	.clone_and_map_rq = io_err_clone_and_map_rq,
271	.release_clone_rq = io_err_release_clone_rq,
272	.iterate_devices = io_err_iterate_devices,
273	.io_hints = io_err_io_hints,
274	.direct_access = io_err_dax_direct_access,
275	.report_zones = io_err_report_zones,
276};
277
278int __init dm_target_init(void)
279{
280	return dm_register_target(&error_target);
281}
282
283void dm_target_exit(void)
284{
285	dm_unregister_target(&error_target);
286}