Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * linux/fs/ext4/page-io.c
  3 *
  4 * This contains the new page_io functions for ext4
  5 *
  6 * Written by Theodore Ts'o, 2010.
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/fs.h>
 11#include <linux/time.h>
 12#include <linux/jbd2.h>
 13#include <linux/highuid.h>
 14#include <linux/pagemap.h>
 15#include <linux/quotaops.h>
 16#include <linux/string.h>
 17#include <linux/buffer_head.h>
 18#include <linux/writeback.h>
 19#include <linux/pagevec.h>
 20#include <linux/mpage.h>
 21#include <linux/namei.h>
 22#include <linux/uio.h>
 23#include <linux/bio.h>
 24#include <linux/workqueue.h>
 25#include <linux/kernel.h>
 26#include <linux/slab.h>
 27
 28#include "ext4_jbd2.h"
 29#include "xattr.h"
 30#include "acl.h"
 31#include "ext4_extents.h"
 32
 33static struct kmem_cache *io_page_cachep, *io_end_cachep;
 34
 35int __init ext4_init_pageio(void)
 36{
 37	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
 38	if (io_page_cachep == NULL)
 39		return -ENOMEM;
 40	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
 41	if (io_end_cachep == NULL) {
 42		kmem_cache_destroy(io_page_cachep);
 43		return -ENOMEM;
 44	}
 45	return 0;
 46}
 47
 48void ext4_exit_pageio(void)
 49{
 50	kmem_cache_destroy(io_end_cachep);
 51	kmem_cache_destroy(io_page_cachep);
 52}
 53
 54void ext4_ioend_wait(struct inode *inode)
 55{
 56	wait_queue_head_t *wq = ext4_ioend_wq(inode);
 57
 58	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
 59}
 60
 61static void put_io_page(struct ext4_io_page *io_page)
 62{
 63	if (atomic_dec_and_test(&io_page->p_count)) {
 64		end_page_writeback(io_page->p_page);
 65		put_page(io_page->p_page);
 66		kmem_cache_free(io_page_cachep, io_page);
 67	}
 68}
 69
 70void ext4_free_io_end(ext4_io_end_t *io)
 71{
 72	int i;
 73	wait_queue_head_t *wq;
 74
 75	BUG_ON(!io);
 76	if (io->page)
 77		put_page(io->page);
 78	for (i = 0; i < io->num_io_pages; i++)
 79		put_io_page(io->pages[i]);
 80	io->num_io_pages = 0;
 81	wq = ext4_ioend_wq(io->inode);
 82	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
 83	    waitqueue_active(wq))
 84		wake_up_all(wq);
 85	kmem_cache_free(io_end_cachep, io);
 86}
 87
 88/*
 89 * check a range of space and convert unwritten extents to written.
 
 
 
 90 */
 91int ext4_end_io_nolock(ext4_io_end_t *io)
 92{
 93	struct inode *inode = io->inode;
 94	loff_t offset = io->offset;
 95	ssize_t size = io->size;
 96	wait_queue_head_t *wq;
 97	int ret = 0;
 98
 99	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
100		   "list->prev 0x%p\n",
101		   io, inode->i_ino, io->list.next, io->list.prev);
102
103	if (list_empty(&io->list))
104		return ret;
105
106	if (!(io->flag & EXT4_IO_END_UNWRITTEN))
107		return ret;
108
109	ret = ext4_convert_unwritten_extents(inode, offset, size);
110	if (ret < 0) {
111		printk(KERN_EMERG "%s: failed to convert unwritten "
112			"extents to written extents, error is %d "
113			"io is still on inode %lu aio dio list\n",
114		       __func__, ret, inode->i_ino);
115		return ret;
116	}
117
118	if (io->iocb)
119		aio_complete(io->iocb, io->result, 0);
120	/* clear the DIO AIO unwritten flag */
121	if (io->flag & EXT4_IO_END_UNWRITTEN) {
122		io->flag &= ~EXT4_IO_END_UNWRITTEN;
123		/* Wake up anyone waiting on unwritten extent conversion */
124		wq = ext4_ioend_wq(io->inode);
125		if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) &&
126		    waitqueue_active(wq)) {
127			wake_up_all(wq);
128		}
129	}
130
 
 
 
 
 
131	return ret;
132}
133
134/*
135 * work on completed aio dio IO, to convert unwritten extents to extents
136 */
137static void ext4_end_io_work(struct work_struct *work)
138{
139	ext4_io_end_t		*io = container_of(work, ext4_io_end_t, work);
140	struct inode		*inode = io->inode;
141	struct ext4_inode_info	*ei = EXT4_I(inode);
142	unsigned long		flags;
143	int			ret;
 
 
 
 
 
 
 
144
145	if (!mutex_trylock(&inode->i_mutex)) {
 
 
 
 
 
146		/*
147		 * Requeue the work instead of waiting so that the work
148		 * items queued after this can be processed.
149		 */
150		queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
151		/*
152		 * To prevent the ext4-dio-unwritten thread from keeping
153		 * requeueing end_io requests and occupying cpu for too long,
154		 * yield the cpu if it sees an end_io request that has already
155		 * been requeued.
156		 */
157		if (io->flag & EXT4_IO_END_QUEUED)
158			yield();
159		io->flag |= EXT4_IO_END_QUEUED;
160		return;
161	}
162	ret = ext4_end_io_nolock(io);
163	if (ret < 0) {
164		mutex_unlock(&inode->i_mutex);
165		return;
166	}
167
168	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
169	if (!list_empty(&io->list))
170		list_del_init(&io->list);
171	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 
172	mutex_unlock(&inode->i_mutex);
 
173	ext4_free_io_end(io);
174}
175
176ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
177{
178	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
179	if (io) {
180		atomic_inc(&EXT4_I(inode)->i_ioend_count);
181		io->inode = inode;
182		INIT_WORK(&io->work, ext4_end_io_work);
183		INIT_LIST_HEAD(&io->list);
184	}
185	return io;
186}
187
188/*
189 * Print an buffer I/O error compatible with the fs/buffer.c.  This
190 * provides compatibility with dmesg scrapers that look for a specific
191 * buffer I/O error message.  We really need a unified error reporting
192 * structure to userspace ala Digital Unix's uerf system, but it's
193 * probably not going to happen in my lifetime, due to LKML politics...
194 */
195static void buffer_io_error(struct buffer_head *bh)
196{
197	char b[BDEVNAME_SIZE];
198	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
199			bdevname(bh->b_bdev, b),
200			(unsigned long long)bh->b_blocknr);
201}
202
203static void ext4_end_bio(struct bio *bio, int error)
204{
205	ext4_io_end_t *io_end = bio->bi_private;
206	struct workqueue_struct *wq;
207	struct inode *inode;
208	unsigned long flags;
209	int i;
210	sector_t bi_sector = bio->bi_sector;
211
212	BUG_ON(!io_end);
213	bio->bi_private = NULL;
214	bio->bi_end_io = NULL;
215	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
216		error = 0;
217	bio_put(bio);
218
219	for (i = 0; i < io_end->num_io_pages; i++) {
220		struct page *page = io_end->pages[i]->p_page;
221		struct buffer_head *bh, *head;
222		loff_t offset;
223		loff_t io_end_offset;
224
225		if (error) {
226			SetPageError(page);
227			set_bit(AS_EIO, &page->mapping->flags);
228			head = page_buffers(page);
229			BUG_ON(!head);
230
231			io_end_offset = io_end->offset + io_end->size;
232
233			offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
234			bh = head;
235			do {
236				if ((offset >= io_end->offset) &&
237				    (offset+bh->b_size <= io_end_offset))
238					buffer_io_error(bh);
239
240				offset += bh->b_size;
241				bh = bh->b_this_page;
242			} while (bh != head);
243		}
244
245		put_io_page(io_end->pages[i]);
246	}
247	io_end->num_io_pages = 0;
248	inode = io_end->inode;
249
250	if (error) {
251		io_end->flag |= EXT4_IO_END_ERROR;
252		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
253			     "(offset %llu size %ld starting block %llu)",
254			     inode->i_ino,
255			     (unsigned long long) io_end->offset,
256			     (long) io_end->size,
257			     (unsigned long long)
258			     bi_sector >> (inode->i_blkbits - 9));
259	}
260
261	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
262		ext4_free_io_end(io_end);
263		return;
264	}
265
266	/* Add the io_end to per-inode completed io list*/
267	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
268	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
269	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
270
271	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
272	/* queue the work to convert unwritten extents to written */
273	queue_work(wq, &io_end->work);
274}
275
276void ext4_io_submit(struct ext4_io_submit *io)
277{
278	struct bio *bio = io->io_bio;
279
280	if (bio) {
281		bio_get(io->io_bio);
282		submit_bio(io->io_op, io->io_bio);
283		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
284		bio_put(io->io_bio);
285	}
286	io->io_bio = NULL;
287	io->io_op = 0;
288	io->io_end = NULL;
289}
290
291static int io_submit_init(struct ext4_io_submit *io,
292			  struct inode *inode,
293			  struct writeback_control *wbc,
294			  struct buffer_head *bh)
295{
296	ext4_io_end_t *io_end;
297	struct page *page = bh->b_page;
298	int nvecs = bio_get_nr_vecs(bh->b_bdev);
299	struct bio *bio;
300
301	io_end = ext4_init_io_end(inode, GFP_NOFS);
302	if (!io_end)
303		return -ENOMEM;
304	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
305	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
306	bio->bi_bdev = bh->b_bdev;
307	bio->bi_private = io->io_end = io_end;
308	bio->bi_end_io = ext4_end_bio;
309
310	io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
311
312	io->io_bio = bio;
313	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
314	io->io_next_block = bh->b_blocknr;
315	return 0;
316}
317
318static int io_submit_add_bh(struct ext4_io_submit *io,
319			    struct ext4_io_page *io_page,
320			    struct inode *inode,
321			    struct writeback_control *wbc,
322			    struct buffer_head *bh)
323{
324	ext4_io_end_t *io_end;
325	int ret;
326
327	if (buffer_new(bh)) {
328		clear_buffer_new(bh);
329		unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
330	}
331
332	if (!buffer_mapped(bh) || buffer_delay(bh)) {
333		if (!buffer_mapped(bh))
334			clear_buffer_dirty(bh);
335		if (io->io_bio)
336			ext4_io_submit(io);
337		return 0;
338	}
339
340	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
341submit_and_retry:
342		ext4_io_submit(io);
343	}
344	if (io->io_bio == NULL) {
345		ret = io_submit_init(io, inode, wbc, bh);
346		if (ret)
347			return ret;
348	}
349	io_end = io->io_end;
350	if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
351	    (io_end->pages[io_end->num_io_pages-1] != io_page))
352		goto submit_and_retry;
353	if (buffer_uninit(bh) && !(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
354		io_end->flag |= EXT4_IO_END_UNWRITTEN;
355		atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
356	}
357	io->io_end->size += bh->b_size;
358	io->io_next_block++;
359	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
360	if (ret != bh->b_size)
361		goto submit_and_retry;
362	if ((io_end->num_io_pages == 0) ||
363	    (io_end->pages[io_end->num_io_pages-1] != io_page)) {
364		io_end->pages[io_end->num_io_pages++] = io_page;
365		atomic_inc(&io_page->p_count);
366	}
367	return 0;
368}
369
370int ext4_bio_write_page(struct ext4_io_submit *io,
371			struct page *page,
372			int len,
373			struct writeback_control *wbc)
374{
375	struct inode *inode = page->mapping->host;
376	unsigned block_start, block_end, blocksize;
377	struct ext4_io_page *io_page;
378	struct buffer_head *bh, *head;
379	int ret = 0;
380
381	blocksize = 1 << inode->i_blkbits;
382
383	BUG_ON(!PageLocked(page));
384	BUG_ON(PageWriteback(page));
385
386	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
387	if (!io_page) {
388		set_page_dirty(page);
389		unlock_page(page);
390		return -ENOMEM;
391	}
392	io_page->p_page = page;
393	atomic_set(&io_page->p_count, 1);
394	get_page(page);
395	set_page_writeback(page);
396	ClearPageError(page);
397
398	for (bh = head = page_buffers(page), block_start = 0;
399	     bh != head || !block_start;
400	     block_start = block_end, bh = bh->b_this_page) {
401
402		block_end = block_start + blocksize;
403		if (block_start >= len) {
 
 
 
 
 
 
 
 
 
 
 
 
404			clear_buffer_dirty(bh);
405			set_buffer_uptodate(bh);
406			continue;
407		}
408		clear_buffer_dirty(bh);
409		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
410		if (ret) {
411			/*
412			 * We only get here on ENOMEM.  Not much else
413			 * we can do but mark the page as dirty, and
414			 * better luck next time.
415			 */
416			set_page_dirty(page);
417			break;
418		}
419	}
420	unlock_page(page);
421	/*
422	 * If the page was truncated before we could do the writeback,
423	 * or we had a memory allocation error while trying to write
424	 * the first buffer head, we won't have submitted any pages for
425	 * I/O.  In that case we need to make sure we've cleared the
426	 * PageWriteback bit from the page to prevent the system from
427	 * wedging later on.
428	 */
429	put_io_page(io_page);
430	return ret;
431}
v3.5.6
  1/*
  2 * linux/fs/ext4/page-io.c
  3 *
  4 * This contains the new page_io functions for ext4
  5 *
  6 * Written by Theodore Ts'o, 2010.
  7 */
  8
 
  9#include <linux/fs.h>
 10#include <linux/time.h>
 11#include <linux/jbd2.h>
 12#include <linux/highuid.h>
 13#include <linux/pagemap.h>
 14#include <linux/quotaops.h>
 15#include <linux/string.h>
 16#include <linux/buffer_head.h>
 17#include <linux/writeback.h>
 18#include <linux/pagevec.h>
 19#include <linux/mpage.h>
 20#include <linux/namei.h>
 21#include <linux/uio.h>
 22#include <linux/bio.h>
 23#include <linux/workqueue.h>
 24#include <linux/kernel.h>
 25#include <linux/slab.h>
 26
 27#include "ext4_jbd2.h"
 28#include "xattr.h"
 29#include "acl.h"
 30#include "ext4_extents.h"
 31
 32static struct kmem_cache *io_page_cachep, *io_end_cachep;
 33
 34int __init ext4_init_pageio(void)
 35{
 36	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
 37	if (io_page_cachep == NULL)
 38		return -ENOMEM;
 39	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
 40	if (io_end_cachep == NULL) {
 41		kmem_cache_destroy(io_page_cachep);
 42		return -ENOMEM;
 43	}
 44	return 0;
 45}
 46
 47void ext4_exit_pageio(void)
 48{
 49	kmem_cache_destroy(io_end_cachep);
 50	kmem_cache_destroy(io_page_cachep);
 51}
 52
 53void ext4_ioend_wait(struct inode *inode)
 54{
 55	wait_queue_head_t *wq = ext4_ioend_wq(inode);
 56
 57	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
 58}
 59
 60static void put_io_page(struct ext4_io_page *io_page)
 61{
 62	if (atomic_dec_and_test(&io_page->p_count)) {
 63		end_page_writeback(io_page->p_page);
 64		put_page(io_page->p_page);
 65		kmem_cache_free(io_page_cachep, io_page);
 66	}
 67}
 68
 69void ext4_free_io_end(ext4_io_end_t *io)
 70{
 71	int i;
 
 72
 73	BUG_ON(!io);
 74	if (io->page)
 75		put_page(io->page);
 76	for (i = 0; i < io->num_io_pages; i++)
 77		put_io_page(io->pages[i]);
 78	io->num_io_pages = 0;
 79	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
 80		wake_up_all(ext4_ioend_wq(io->inode));
 
 
 81	kmem_cache_free(io_end_cachep, io);
 82}
 83
 84/*
 85 * check a range of space and convert unwritten extents to written.
 86 *
 87 * Called with inode->i_mutex; we depend on this when we manipulate
 88 * io->flag, since we could otherwise race with ext4_flush_completed_IO()
 89 */
 90int ext4_end_io_nolock(ext4_io_end_t *io)
 91{
 92	struct inode *inode = io->inode;
 93	loff_t offset = io->offset;
 94	ssize_t size = io->size;
 
 95	int ret = 0;
 96
 97	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
 98		   "list->prev 0x%p\n",
 99		   io, inode->i_ino, io->list.next, io->list.prev);
100
 
 
 
 
 
 
101	ret = ext4_convert_unwritten_extents(inode, offset, size);
102	if (ret < 0) {
103		ext4_msg(inode->i_sb, KERN_EMERG,
104			 "failed to convert unwritten extents to written "
105			 "extents -- potential data loss!  "
106			 "(inode %lu, offset %llu, size %zd, error %d)",
107			 inode->i_ino, offset, size, ret);
108	}
109
110	if (io->iocb)
111		aio_complete(io->iocb, io->result, 0);
 
 
 
 
 
 
 
 
 
 
112
113	if (io->flag & EXT4_IO_END_DIRECT)
114		inode_dio_done(inode);
115	/* Wake up anyone waiting on unwritten extent conversion */
116	if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
117		wake_up_all(ext4_ioend_wq(io->inode));
118	return ret;
119}
120
121/*
122 * work on completed aio dio IO, to convert unwritten extents to extents
123 */
124static void ext4_end_io_work(struct work_struct *work)
125{
126	ext4_io_end_t		*io = container_of(work, ext4_io_end_t, work);
127	struct inode		*inode = io->inode;
128	struct ext4_inode_info	*ei = EXT4_I(inode);
129	unsigned long		flags;
130
131	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
132	if (io->flag & EXT4_IO_END_IN_FSYNC)
133		goto requeue;
134	if (list_empty(&io->list)) {
135		spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
136		goto free;
137	}
138
139	if (!mutex_trylock(&inode->i_mutex)) {
140		bool was_queued;
141requeue:
142		was_queued = !!(io->flag & EXT4_IO_END_QUEUED);
143		io->flag |= EXT4_IO_END_QUEUED;
144		spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
145		/*
146		 * Requeue the work instead of waiting so that the work
147		 * items queued after this can be processed.
148		 */
149		queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
150		/*
151		 * To prevent the ext4-dio-unwritten thread from keeping
152		 * requeueing end_io requests and occupying cpu for too long,
153		 * yield the cpu if it sees an end_io request that has already
154		 * been requeued.
155		 */
156		if (was_queued)
157			yield();
 
 
 
 
 
 
158		return;
159	}
160	list_del_init(&io->list);
 
 
 
161	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
162	(void) ext4_end_io_nolock(io);
163	mutex_unlock(&inode->i_mutex);
164free:
165	ext4_free_io_end(io);
166}
167
168ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
169{
170	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
171	if (io) {
172		atomic_inc(&EXT4_I(inode)->i_ioend_count);
173		io->inode = inode;
174		INIT_WORK(&io->work, ext4_end_io_work);
175		INIT_LIST_HEAD(&io->list);
176	}
177	return io;
178}
179
180/*
181 * Print an buffer I/O error compatible with the fs/buffer.c.  This
182 * provides compatibility with dmesg scrapers that look for a specific
183 * buffer I/O error message.  We really need a unified error reporting
184 * structure to userspace ala Digital Unix's uerf system, but it's
185 * probably not going to happen in my lifetime, due to LKML politics...
186 */
187static void buffer_io_error(struct buffer_head *bh)
188{
189	char b[BDEVNAME_SIZE];
190	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
191			bdevname(bh->b_bdev, b),
192			(unsigned long long)bh->b_blocknr);
193}
194
195static void ext4_end_bio(struct bio *bio, int error)
196{
197	ext4_io_end_t *io_end = bio->bi_private;
198	struct workqueue_struct *wq;
199	struct inode *inode;
200	unsigned long flags;
201	int i;
202	sector_t bi_sector = bio->bi_sector;
203
204	BUG_ON(!io_end);
205	bio->bi_private = NULL;
206	bio->bi_end_io = NULL;
207	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
208		error = 0;
209	bio_put(bio);
210
211	for (i = 0; i < io_end->num_io_pages; i++) {
212		struct page *page = io_end->pages[i]->p_page;
213		struct buffer_head *bh, *head;
214		loff_t offset;
215		loff_t io_end_offset;
216
217		if (error) {
218			SetPageError(page);
219			set_bit(AS_EIO, &page->mapping->flags);
220			head = page_buffers(page);
221			BUG_ON(!head);
222
223			io_end_offset = io_end->offset + io_end->size;
224
225			offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
226			bh = head;
227			do {
228				if ((offset >= io_end->offset) &&
229				    (offset+bh->b_size <= io_end_offset))
230					buffer_io_error(bh);
231
232				offset += bh->b_size;
233				bh = bh->b_this_page;
234			} while (bh != head);
235		}
236
237		put_io_page(io_end->pages[i]);
238	}
239	io_end->num_io_pages = 0;
240	inode = io_end->inode;
241
242	if (error) {
243		io_end->flag |= EXT4_IO_END_ERROR;
244		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
245			     "(offset %llu size %ld starting block %llu)",
246			     inode->i_ino,
247			     (unsigned long long) io_end->offset,
248			     (long) io_end->size,
249			     (unsigned long long)
250			     bi_sector >> (inode->i_blkbits - 9));
251	}
252
253	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
254		ext4_free_io_end(io_end);
255		return;
256	}
257
258	/* Add the io_end to per-inode completed io list*/
259	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
260	list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
261	spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
262
263	wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
264	/* queue the work to convert unwritten extents to written */
265	queue_work(wq, &io_end->work);
266}
267
268void ext4_io_submit(struct ext4_io_submit *io)
269{
270	struct bio *bio = io->io_bio;
271
272	if (bio) {
273		bio_get(io->io_bio);
274		submit_bio(io->io_op, io->io_bio);
275		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
276		bio_put(io->io_bio);
277	}
278	io->io_bio = NULL;
279	io->io_op = 0;
280	io->io_end = NULL;
281}
282
283static int io_submit_init(struct ext4_io_submit *io,
284			  struct inode *inode,
285			  struct writeback_control *wbc,
286			  struct buffer_head *bh)
287{
288	ext4_io_end_t *io_end;
289	struct page *page = bh->b_page;
290	int nvecs = bio_get_nr_vecs(bh->b_bdev);
291	struct bio *bio;
292
293	io_end = ext4_init_io_end(inode, GFP_NOFS);
294	if (!io_end)
295		return -ENOMEM;
296	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
297	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
298	bio->bi_bdev = bh->b_bdev;
299	bio->bi_private = io->io_end = io_end;
300	bio->bi_end_io = ext4_end_bio;
301
302	io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
303
304	io->io_bio = bio;
305	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
306	io->io_next_block = bh->b_blocknr;
307	return 0;
308}
309
310static int io_submit_add_bh(struct ext4_io_submit *io,
311			    struct ext4_io_page *io_page,
312			    struct inode *inode,
313			    struct writeback_control *wbc,
314			    struct buffer_head *bh)
315{
316	ext4_io_end_t *io_end;
317	int ret;
318
319	if (buffer_new(bh)) {
320		clear_buffer_new(bh);
321		unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
322	}
323
324	if (!buffer_mapped(bh) || buffer_delay(bh)) {
325		if (!buffer_mapped(bh))
326			clear_buffer_dirty(bh);
327		if (io->io_bio)
328			ext4_io_submit(io);
329		return 0;
330	}
331
332	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
333submit_and_retry:
334		ext4_io_submit(io);
335	}
336	if (io->io_bio == NULL) {
337		ret = io_submit_init(io, inode, wbc, bh);
338		if (ret)
339			return ret;
340	}
341	io_end = io->io_end;
342	if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
343	    (io_end->pages[io_end->num_io_pages-1] != io_page))
344		goto submit_and_retry;
345	if (buffer_uninit(bh))
346		ext4_set_io_unwritten_flag(inode, io_end);
 
 
347	io->io_end->size += bh->b_size;
348	io->io_next_block++;
349	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
350	if (ret != bh->b_size)
351		goto submit_and_retry;
352	if ((io_end->num_io_pages == 0) ||
353	    (io_end->pages[io_end->num_io_pages-1] != io_page)) {
354		io_end->pages[io_end->num_io_pages++] = io_page;
355		atomic_inc(&io_page->p_count);
356	}
357	return 0;
358}
359
360int ext4_bio_write_page(struct ext4_io_submit *io,
361			struct page *page,
362			int len,
363			struct writeback_control *wbc)
364{
365	struct inode *inode = page->mapping->host;
366	unsigned block_start, block_end, blocksize;
367	struct ext4_io_page *io_page;
368	struct buffer_head *bh, *head;
369	int ret = 0;
370
371	blocksize = 1 << inode->i_blkbits;
372
373	BUG_ON(!PageLocked(page));
374	BUG_ON(PageWriteback(page));
375
376	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
377	if (!io_page) {
378		set_page_dirty(page);
379		unlock_page(page);
380		return -ENOMEM;
381	}
382	io_page->p_page = page;
383	atomic_set(&io_page->p_count, 1);
384	get_page(page);
385	set_page_writeback(page);
386	ClearPageError(page);
387
388	for (bh = head = page_buffers(page), block_start = 0;
389	     bh != head || !block_start;
390	     block_start = block_end, bh = bh->b_this_page) {
391
392		block_end = block_start + blocksize;
393		if (block_start >= len) {
394			/*
395			 * Comments copied from block_write_full_page_endio:
396			 *
397			 * The page straddles i_size.  It must be zeroed out on
398			 * each and every writepage invocation because it may
399			 * be mmapped.  "A file is mapped in multiples of the
400			 * page size.  For a file that is not a multiple of
401			 * the  page size, the remaining memory is zeroed when
402			 * mapped, and writes to that region are not written
403			 * out to the file."
404			 */
405			zero_user_segment(page, block_start, block_end);
406			clear_buffer_dirty(bh);
407			set_buffer_uptodate(bh);
408			continue;
409		}
410		clear_buffer_dirty(bh);
411		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
412		if (ret) {
413			/*
414			 * We only get here on ENOMEM.  Not much else
415			 * we can do but mark the page as dirty, and
416			 * better luck next time.
417			 */
418			set_page_dirty(page);
419			break;
420		}
421	}
422	unlock_page(page);
423	/*
424	 * If the page was truncated before we could do the writeback,
425	 * or we had a memory allocation error while trying to write
426	 * the first buffer head, we won't have submitted any pages for
427	 * I/O.  In that case we need to make sure we've cleared the
428	 * PageWriteback bit from the page to prevent the system from
429	 * wedging later on.
430	 */
431	put_io_page(io_page);
432	return ret;
433}