Linux Audio

Check our new training course

Loading...
  1/*
  2 * fs/logfs/dev_bdev.c	- Device access methods for block devices
  3 *
  4 * As should be obvious for Linux kernel code, license is GPLv2
  5 *
  6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
  7 */
  8#include "logfs.h"
  9#include <linux/bio.h>
 10#include <linux/blkdev.h>
 11#include <linux/buffer_head.h>
 12#include <linux/gfp.h>
 13#include <linux/prefetch.h>
 14
 15#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
 16
 17static void request_complete(struct bio *bio, int err)
 18{
 19	complete((struct completion *)bio->bi_private);
 20}
 21
 22static int sync_request(struct page *page, struct block_device *bdev, int rw)
 23{
 24	struct bio bio;
 25	struct bio_vec bio_vec;
 26	struct completion complete;
 27
 28	bio_init(&bio);
 29	bio.bi_io_vec = &bio_vec;
 30	bio_vec.bv_page = page;
 31	bio_vec.bv_len = PAGE_SIZE;
 32	bio_vec.bv_offset = 0;
 33	bio.bi_vcnt = 1;
 34	bio.bi_idx = 0;
 35	bio.bi_size = PAGE_SIZE;
 36	bio.bi_bdev = bdev;
 37	bio.bi_sector = page->index * (PAGE_SIZE >> 9);
 38	init_completion(&complete);
 39	bio.bi_private = &complete;
 40	bio.bi_end_io = request_complete;
 41
 42	submit_bio(rw, &bio);
 43	wait_for_completion(&complete);
 44	return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
 45}
 46
 47static int bdev_readpage(void *_sb, struct page *page)
 48{
 49	struct super_block *sb = _sb;
 50	struct block_device *bdev = logfs_super(sb)->s_bdev;
 51	int err;
 52
 53	err = sync_request(page, bdev, READ);
 54	if (err) {
 55		ClearPageUptodate(page);
 56		SetPageError(page);
 57	} else {
 58		SetPageUptodate(page);
 59		ClearPageError(page);
 60	}
 61	unlock_page(page);
 62	return err;
 63}
 64
 65static DECLARE_WAIT_QUEUE_HEAD(wq);
 66
 67static void writeseg_end_io(struct bio *bio, int err)
 68{
 69	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 70	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
 71	struct super_block *sb = bio->bi_private;
 72	struct logfs_super *super = logfs_super(sb);
 73	struct page *page;
 74
 75	BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
 76	BUG_ON(err);
 77	BUG_ON(bio->bi_vcnt == 0);
 78	do {
 79		page = bvec->bv_page;
 80		if (--bvec >= bio->bi_io_vec)
 81			prefetchw(&bvec->bv_page->flags);
 82
 83		end_page_writeback(page);
 84		page_cache_release(page);
 85	} while (bvec >= bio->bi_io_vec);
 86	bio_put(bio);
 87	if (atomic_dec_and_test(&super->s_pending_writes))
 88		wake_up(&wq);
 89}
 90
 91static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
 92		size_t nr_pages)
 93{
 94	struct logfs_super *super = logfs_super(sb);
 95	struct address_space *mapping = super->s_mapping_inode->i_mapping;
 96	struct bio *bio;
 97	struct page *page;
 98	struct request_queue *q = bdev_get_queue(sb->s_bdev);
 99	unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
100	int i;
101
102	if (max_pages > BIO_MAX_PAGES)
103		max_pages = BIO_MAX_PAGES;
104	bio = bio_alloc(GFP_NOFS, max_pages);
105	BUG_ON(!bio);
106
107	for (i = 0; i < nr_pages; i++) {
108		if (i >= max_pages) {
109			/* Block layer cannot split bios :( */
110			bio->bi_vcnt = i;
111			bio->bi_idx = 0;
112			bio->bi_size = i * PAGE_SIZE;
113			bio->bi_bdev = super->s_bdev;
114			bio->bi_sector = ofs >> 9;
115			bio->bi_private = sb;
116			bio->bi_end_io = writeseg_end_io;
117			atomic_inc(&super->s_pending_writes);
118			submit_bio(WRITE, bio);
119
120			ofs += i * PAGE_SIZE;
121			index += i;
122			nr_pages -= i;
123			i = 0;
124
125			bio = bio_alloc(GFP_NOFS, max_pages);
126			BUG_ON(!bio);
127		}
128		page = find_lock_page(mapping, index + i);
129		BUG_ON(!page);
130		bio->bi_io_vec[i].bv_page = page;
131		bio->bi_io_vec[i].bv_len = PAGE_SIZE;
132		bio->bi_io_vec[i].bv_offset = 0;
133
134		BUG_ON(PageWriteback(page));
135		set_page_writeback(page);
136		unlock_page(page);
137	}
138	bio->bi_vcnt = nr_pages;
139	bio->bi_idx = 0;
140	bio->bi_size = nr_pages * PAGE_SIZE;
141	bio->bi_bdev = super->s_bdev;
142	bio->bi_sector = ofs >> 9;
143	bio->bi_private = sb;
144	bio->bi_end_io = writeseg_end_io;
145	atomic_inc(&super->s_pending_writes);
146	submit_bio(WRITE, bio);
147	return 0;
148}
149
150static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
151{
152	struct logfs_super *super = logfs_super(sb);
153	int head;
154
155	BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
156
157	if (len == 0) {
158		/* This can happen when the object fit perfectly into a
159		 * segment, the segment gets written per sync and subsequently
160		 * closed.
161		 */
162		return;
163	}
164	head = ofs & (PAGE_SIZE - 1);
165	if (head) {
166		ofs -= head;
167		len += head;
168	}
169	len = PAGE_ALIGN(len);
170	__bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
171}
172
173
174static void erase_end_io(struct bio *bio, int err) 
175{ 
176	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 
177	struct super_block *sb = bio->bi_private; 
178	struct logfs_super *super = logfs_super(sb); 
179
180	BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 
181	BUG_ON(err); 
182	BUG_ON(bio->bi_vcnt == 0); 
183	bio_put(bio); 
184	if (atomic_dec_and_test(&super->s_pending_writes))
185		wake_up(&wq); 
186} 
187
188static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
189		size_t nr_pages)
190{
191	struct logfs_super *super = logfs_super(sb);
192	struct bio *bio;
193	struct request_queue *q = bdev_get_queue(sb->s_bdev);
194	unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
195	int i;
196
197	if (max_pages > BIO_MAX_PAGES)
198		max_pages = BIO_MAX_PAGES;
199	bio = bio_alloc(GFP_NOFS, max_pages);
200	BUG_ON(!bio);
201
202	for (i = 0; i < nr_pages; i++) {
203		if (i >= max_pages) {
204			/* Block layer cannot split bios :( */
205			bio->bi_vcnt = i;
206			bio->bi_idx = 0;
207			bio->bi_size = i * PAGE_SIZE;
208			bio->bi_bdev = super->s_bdev;
209			bio->bi_sector = ofs >> 9;
210			bio->bi_private = sb;
211			bio->bi_end_io = erase_end_io;
212			atomic_inc(&super->s_pending_writes);
213			submit_bio(WRITE, bio);
214
215			ofs += i * PAGE_SIZE;
216			index += i;
217			nr_pages -= i;
218			i = 0;
219
220			bio = bio_alloc(GFP_NOFS, max_pages);
221			BUG_ON(!bio);
222		}
223		bio->bi_io_vec[i].bv_page = super->s_erase_page;
224		bio->bi_io_vec[i].bv_len = PAGE_SIZE;
225		bio->bi_io_vec[i].bv_offset = 0;
226	}
227	bio->bi_vcnt = nr_pages;
228	bio->bi_idx = 0;
229	bio->bi_size = nr_pages * PAGE_SIZE;
230	bio->bi_bdev = super->s_bdev;
231	bio->bi_sector = ofs >> 9;
232	bio->bi_private = sb;
233	bio->bi_end_io = erase_end_io;
234	atomic_inc(&super->s_pending_writes);
235	submit_bio(WRITE, bio);
236	return 0;
237}
238
239static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
240		int ensure_write)
241{
242	struct logfs_super *super = logfs_super(sb);
243
244	BUG_ON(to & (PAGE_SIZE - 1));
245	BUG_ON(len & (PAGE_SIZE - 1));
246
247	if (super->s_flags & LOGFS_SB_FLAG_RO)
248		return -EROFS;
249
250	if (ensure_write) {
251		/*
252		 * Object store doesn't care whether erases happen or not.
253		 * But for the journal they are required.  Otherwise a scan
254		 * can find an old commit entry and assume it is the current
255		 * one, travelling back in time.
256		 */
257		do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
258	}
259
260	return 0;
261}
262
263static void bdev_sync(struct super_block *sb)
264{
265	struct logfs_super *super = logfs_super(sb);
266
267	wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
268}
269
270static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
271{
272	struct logfs_super *super = logfs_super(sb);
273	struct address_space *mapping = super->s_mapping_inode->i_mapping;
274	filler_t *filler = bdev_readpage;
275
276	*ofs = 0;
277	return read_cache_page(mapping, 0, filler, sb);
278}
279
280static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
281{
282	struct logfs_super *super = logfs_super(sb);
283	struct address_space *mapping = super->s_mapping_inode->i_mapping;
284	filler_t *filler = bdev_readpage;
285	u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
286	pgoff_t index = pos >> PAGE_SHIFT;
287
288	*ofs = pos;
289	return read_cache_page(mapping, index, filler, sb);
290}
291
292static int bdev_write_sb(struct super_block *sb, struct page *page)
293{
294	struct block_device *bdev = logfs_super(sb)->s_bdev;
295
296	/* Nothing special to do for block devices. */
297	return sync_request(page, bdev, WRITE);
298}
299
300static void bdev_put_device(struct logfs_super *s)
301{
302	blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
303}
304
305static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
306{
307	return 0;
308}
309
310static const struct logfs_device_ops bd_devops = {
311	.find_first_sb	= bdev_find_first_sb,
312	.find_last_sb	= bdev_find_last_sb,
313	.write_sb	= bdev_write_sb,
314	.readpage	= bdev_readpage,
315	.writeseg	= bdev_writeseg,
316	.erase		= bdev_erase,
317	.can_write_buf	= bdev_can_write_buf,
318	.sync		= bdev_sync,
319	.put_device	= bdev_put_device,
320};
321
322int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
323		const char *devname)
324{
325	struct block_device *bdev;
326
327	bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
328				  type);
329	if (IS_ERR(bdev))
330		return PTR_ERR(bdev);
331
332	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
333		int mtdnr = MINOR(bdev->bd_dev);
334		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
335		return logfs_get_sb_mtd(p, mtdnr);
336	}
337
338	p->s_bdev = bdev;
339	p->s_mtd = NULL;
340	p->s_devops = &bd_devops;
341	return 0;
342}