Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
  3 *
  4 * This file is released under the GPL.
  5 */
  6
  7#include "dm.h"
  8#include <linux/module.h>
  9#include <linux/init.h>
 10#include <linux/blkdev.h>
 11#include <linux/bio.h>
 12#include <linux/dax.h>
 13#include <linux/slab.h>
 14#include <linux/device-mapper.h>
 15
 16#define DM_MSG_PREFIX "linear"
 17
 18/*
 19 * Linear: maps a linear range of a device.
 20 */
 21struct linear_c {
 22	struct dm_dev *dev;
 23	sector_t start;
 24};
 25
 26/*
 27 * Construct a linear mapping: <dev_path> <offset>
 28 */
 29static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 30{
 31	struct linear_c *lc;
 32	unsigned long long tmp;
 33	char dummy;
 34	int ret;
 35
 36	if (argc != 2) {
 37		ti->error = "Invalid argument count";
 38		return -EINVAL;
 39	}
 40
 41	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
 42	if (lc == NULL) {
 43		ti->error = "Cannot allocate linear context";
 44		return -ENOMEM;
 45	}
 46
 47	ret = -EINVAL;
 48	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
 49		ti->error = "Invalid device sector";
 50		goto bad;
 51	}
 52	lc->start = tmp;
 53
 54	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
 55	if (ret) {
 56		ti->error = "Device lookup failed";
 57		goto bad;
 58	}
 59
 60	ti->num_flush_bios = 1;
 61	ti->num_discard_bios = 1;
 62	ti->num_secure_erase_bios = 1;
 63	ti->num_write_same_bios = 1;
 64	ti->num_write_zeroes_bios = 1;
 65	ti->private = lc;
 66	return 0;
 67
 68      bad:
 69	kfree(lc);
 70	return ret;
 71}
 72
 73static void linear_dtr(struct dm_target *ti)
 74{
 75	struct linear_c *lc = (struct linear_c *) ti->private;
 76
 77	dm_put_device(ti, lc->dev);
 78	kfree(lc);
 79}
 80
 81static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
 82{
 83	struct linear_c *lc = ti->private;
 84
 85	return lc->start + dm_target_offset(ti, bi_sector);
 86}
 87
 88static void linear_map_bio(struct dm_target *ti, struct bio *bio)
 89{
 90	struct linear_c *lc = ti->private;
 91
 92	bio_set_dev(bio, lc->dev->bdev);
 93	if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
 94		bio->bi_iter.bi_sector =
 95			linear_map_sector(ti, bio->bi_iter.bi_sector);
 96}
 97
 98static int linear_map(struct dm_target *ti, struct bio *bio)
 99{
100	linear_map_bio(ti, bio);
101
102	return DM_MAPIO_REMAPPED;
103}
104
105static void linear_status(struct dm_target *ti, status_type_t type,
106			  unsigned status_flags, char *result, unsigned maxlen)
107{
108	struct linear_c *lc = (struct linear_c *) ti->private;
109
110	switch (type) {
111	case STATUSTYPE_INFO:
112		result[0] = '\0';
113		break;
114
115	case STATUSTYPE_TABLE:
116		snprintf(result, maxlen, "%s %llu", lc->dev->name,
117				(unsigned long long)lc->start);
118		break;
119	}
120}
121
122static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
123{
124	struct linear_c *lc = (struct linear_c *) ti->private;
125	struct dm_dev *dev = lc->dev;
126
127	*bdev = dev->bdev;
128
129	/*
130	 * Only pass ioctls through if the device sizes match exactly.
131	 */
132	if (lc->start ||
133	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
134		return 1;
135	return 0;
136}
137
138#ifdef CONFIG_BLK_DEV_ZONED
139static int linear_report_zones(struct dm_target *ti,
140		struct dm_report_zones_args *args, unsigned int nr_zones)
141{
142	struct linear_c *lc = ti->private;
143	sector_t sector = linear_map_sector(ti, args->next_sector);
 
 
 
 
 
 
144
145	args->start = lc->start;
146	return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
147				   dm_report_zones_cb, args);
148}
149#endif
150
151static int linear_iterate_devices(struct dm_target *ti,
152				  iterate_devices_callout_fn fn, void *data)
153{
154	struct linear_c *lc = ti->private;
155
156	return fn(ti, lc->dev, lc->start, ti->len, data);
157}
158
159#if IS_ENABLED(CONFIG_DAX_DRIVER)
160static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
161		long nr_pages, void **kaddr, pfn_t *pfn)
162{
163	long ret;
164	struct linear_c *lc = ti->private;
165	struct block_device *bdev = lc->dev->bdev;
166	struct dax_device *dax_dev = lc->dev->dax_dev;
167	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
168
169	dev_sector = linear_map_sector(ti, sector);
170	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
171	if (ret)
172		return ret;
173	return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
174}
175
176static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
177		void *addr, size_t bytes, struct iov_iter *i)
178{
179	struct linear_c *lc = ti->private;
180	struct block_device *bdev = lc->dev->bdev;
181	struct dax_device *dax_dev = lc->dev->dax_dev;
182	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
183
184	dev_sector = linear_map_sector(ti, sector);
185	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
186		return 0;
187	return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
188}
189
190static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
191		void *addr, size_t bytes, struct iov_iter *i)
192{
193	struct linear_c *lc = ti->private;
194	struct block_device *bdev = lc->dev->bdev;
195	struct dax_device *dax_dev = lc->dev->dax_dev;
196	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
197
198	dev_sector = linear_map_sector(ti, sector);
199	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
200		return 0;
201	return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
202}
203
204static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
205				      size_t nr_pages)
206{
207	int ret;
208	struct linear_c *lc = ti->private;
209	struct block_device *bdev = lc->dev->bdev;
210	struct dax_device *dax_dev = lc->dev->dax_dev;
211	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
212
213	dev_sector = linear_map_sector(ti, sector);
214	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
215	if (ret)
216		return ret;
217	return dax_zero_page_range(dax_dev, pgoff, nr_pages);
218}
219
220#else
221#define linear_dax_direct_access NULL
222#define linear_dax_copy_from_iter NULL
223#define linear_dax_copy_to_iter NULL
224#define linear_dax_zero_page_range NULL
225#endif
226
227static struct target_type linear_target = {
228	.name   = "linear",
229	.version = {1, 4, 0},
230#ifdef CONFIG_BLK_DEV_ZONED
231	.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
232	.report_zones = linear_report_zones,
233#else
234	.features = DM_TARGET_PASSES_INTEGRITY,
235#endif
236	.module = THIS_MODULE,
237	.ctr    = linear_ctr,
238	.dtr    = linear_dtr,
239	.map    = linear_map,
240	.status = linear_status,
241	.prepare_ioctl = linear_prepare_ioctl,
242	.iterate_devices = linear_iterate_devices,
243	.direct_access = linear_dax_direct_access,
244	.dax_copy_from_iter = linear_dax_copy_from_iter,
245	.dax_copy_to_iter = linear_dax_copy_to_iter,
246	.dax_zero_page_range = linear_dax_zero_page_range,
247};
248
249int __init dm_linear_init(void)
250{
251	int r = dm_register_target(&linear_target);
252
253	if (r < 0)
254		DMERR("register failed %d", r);
255
256	return r;
257}
258
259void dm_linear_exit(void)
260{
261	dm_unregister_target(&linear_target);
262}
v5.4
  1/*
  2 * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
  3 *
  4 * This file is released under the GPL.
  5 */
  6
  7#include "dm.h"
  8#include <linux/module.h>
  9#include <linux/init.h>
 10#include <linux/blkdev.h>
 11#include <linux/bio.h>
 12#include <linux/dax.h>
 13#include <linux/slab.h>
 14#include <linux/device-mapper.h>
 15
 16#define DM_MSG_PREFIX "linear"
 17
 18/*
 19 * Linear: maps a linear range of a device.
 20 */
 21struct linear_c {
 22	struct dm_dev *dev;
 23	sector_t start;
 24};
 25
 26/*
 27 * Construct a linear mapping: <dev_path> <offset>
 28 */
 29static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 30{
 31	struct linear_c *lc;
 32	unsigned long long tmp;
 33	char dummy;
 34	int ret;
 35
 36	if (argc != 2) {
 37		ti->error = "Invalid argument count";
 38		return -EINVAL;
 39	}
 40
 41	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
 42	if (lc == NULL) {
 43		ti->error = "Cannot allocate linear context";
 44		return -ENOMEM;
 45	}
 46
 47	ret = -EINVAL;
 48	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
 49		ti->error = "Invalid device sector";
 50		goto bad;
 51	}
 52	lc->start = tmp;
 53
 54	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
 55	if (ret) {
 56		ti->error = "Device lookup failed";
 57		goto bad;
 58	}
 59
 60	ti->num_flush_bios = 1;
 61	ti->num_discard_bios = 1;
 62	ti->num_secure_erase_bios = 1;
 63	ti->num_write_same_bios = 1;
 64	ti->num_write_zeroes_bios = 1;
 65	ti->private = lc;
 66	return 0;
 67
 68      bad:
 69	kfree(lc);
 70	return ret;
 71}
 72
 73static void linear_dtr(struct dm_target *ti)
 74{
 75	struct linear_c *lc = (struct linear_c *) ti->private;
 76
 77	dm_put_device(ti, lc->dev);
 78	kfree(lc);
 79}
 80
 81static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
 82{
 83	struct linear_c *lc = ti->private;
 84
 85	return lc->start + dm_target_offset(ti, bi_sector);
 86}
 87
 88static void linear_map_bio(struct dm_target *ti, struct bio *bio)
 89{
 90	struct linear_c *lc = ti->private;
 91
 92	bio_set_dev(bio, lc->dev->bdev);
 93	if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
 94		bio->bi_iter.bi_sector =
 95			linear_map_sector(ti, bio->bi_iter.bi_sector);
 96}
 97
 98static int linear_map(struct dm_target *ti, struct bio *bio)
 99{
100	linear_map_bio(ti, bio);
101
102	return DM_MAPIO_REMAPPED;
103}
104
105static void linear_status(struct dm_target *ti, status_type_t type,
106			  unsigned status_flags, char *result, unsigned maxlen)
107{
108	struct linear_c *lc = (struct linear_c *) ti->private;
109
110	switch (type) {
111	case STATUSTYPE_INFO:
112		result[0] = '\0';
113		break;
114
115	case STATUSTYPE_TABLE:
116		snprintf(result, maxlen, "%s %llu", lc->dev->name,
117				(unsigned long long)lc->start);
118		break;
119	}
120}
121
122static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
123{
124	struct linear_c *lc = (struct linear_c *) ti->private;
125	struct dm_dev *dev = lc->dev;
126
127	*bdev = dev->bdev;
128
129	/*
130	 * Only pass ioctls through if the device sizes match exactly.
131	 */
132	if (lc->start ||
133	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
134		return 1;
135	return 0;
136}
137
138#ifdef CONFIG_BLK_DEV_ZONED
139static int linear_report_zones(struct dm_target *ti, sector_t sector,
140			       struct blk_zone *zones, unsigned int *nr_zones)
141{
142	struct linear_c *lc = (struct linear_c *) ti->private;
143	int ret;
144
145	/* Do report and remap it */
146	ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
147				  zones, nr_zones);
148	if (ret != 0)
149		return ret;
150
151	if (*nr_zones)
152		dm_remap_zone_report(ti, lc->start, zones, nr_zones);
153	return 0;
154}
155#endif
156
157static int linear_iterate_devices(struct dm_target *ti,
158				  iterate_devices_callout_fn fn, void *data)
159{
160	struct linear_c *lc = ti->private;
161
162	return fn(ti, lc->dev, lc->start, ti->len, data);
163}
164
165#if IS_ENABLED(CONFIG_DAX_DRIVER)
166static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
167		long nr_pages, void **kaddr, pfn_t *pfn)
168{
169	long ret;
170	struct linear_c *lc = ti->private;
171	struct block_device *bdev = lc->dev->bdev;
172	struct dax_device *dax_dev = lc->dev->dax_dev;
173	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
174
175	dev_sector = linear_map_sector(ti, sector);
176	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
177	if (ret)
178		return ret;
179	return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
180}
181
182static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
183		void *addr, size_t bytes, struct iov_iter *i)
184{
185	struct linear_c *lc = ti->private;
186	struct block_device *bdev = lc->dev->bdev;
187	struct dax_device *dax_dev = lc->dev->dax_dev;
188	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
189
190	dev_sector = linear_map_sector(ti, sector);
191	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
192		return 0;
193	return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
194}
195
196static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
197		void *addr, size_t bytes, struct iov_iter *i)
198{
199	struct linear_c *lc = ti->private;
200	struct block_device *bdev = lc->dev->bdev;
201	struct dax_device *dax_dev = lc->dev->dax_dev;
202	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
203
204	dev_sector = linear_map_sector(ti, sector);
205	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
206		return 0;
207	return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
208}
209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210#else
211#define linear_dax_direct_access NULL
212#define linear_dax_copy_from_iter NULL
213#define linear_dax_copy_to_iter NULL
 
214#endif
215
216static struct target_type linear_target = {
217	.name   = "linear",
218	.version = {1, 4, 0},
219#ifdef CONFIG_BLK_DEV_ZONED
220	.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
221	.report_zones = linear_report_zones,
222#else
223	.features = DM_TARGET_PASSES_INTEGRITY,
224#endif
225	.module = THIS_MODULE,
226	.ctr    = linear_ctr,
227	.dtr    = linear_dtr,
228	.map    = linear_map,
229	.status = linear_status,
230	.prepare_ioctl = linear_prepare_ioctl,
231	.iterate_devices = linear_iterate_devices,
232	.direct_access = linear_dax_direct_access,
233	.dax_copy_from_iter = linear_dax_copy_from_iter,
234	.dax_copy_to_iter = linear_dax_copy_to_iter,
 
235};
236
237int __init dm_linear_init(void)
238{
239	int r = dm_register_target(&linear_target);
240
241	if (r < 0)
242		DMERR("register failed %d", r);
243
244	return r;
245}
246
247void dm_linear_exit(void)
248{
249	dm_unregister_target(&linear_target);
250}