Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
  3 */
  4
  5#include <linux/time.h>
  6#include <linux/reiserfs_fs.h>
  7#include <linux/reiserfs_acl.h>
  8#include <linux/reiserfs_xattr.h>
  9#include <asm/uaccess.h>
 10#include <linux/pagemap.h>
 11#include <linux/swap.h>
 12#include <linux/writeback.h>
 13#include <linux/blkdev.h>
 14#include <linux/buffer_head.h>
 15#include <linux/quotaops.h>
 16
 17/*
 18** We pack the tails of files on file close, not at the time they are written.
 19** This implies an unnecessary copy of the tail and an unnecessary indirect item
 20** insertion/balancing, for files that are written in one write.
 21** It avoids unnecessary tail packings (balances) for files that are written in
 22** multiple writes and are small enough to have tails.
 23**
 24** file_release is called by the VFS layer when the file is closed.  If
 25** this is the last open file descriptor, and the file
 26** small enough to have a tail, and the tail is currently in an
 27** unformatted node, the tail is converted back into a direct item.
 28**
 29** We use reiserfs_truncate_file to pack the tail, since it already has
 30** all the conditions coded.
 31*/
 32static int reiserfs_file_release(struct inode *inode, struct file *filp)
 33{
 34
 35	struct reiserfs_transaction_handle th;
 36	int err;
 37	int jbegin_failure = 0;
 38
 39	BUG_ON(!S_ISREG(inode->i_mode));
 40
 41        if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
 42		return 0;
 43
 44	mutex_lock(&(REISERFS_I(inode)->tailpack));
 45
 46        if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
 47		mutex_unlock(&(REISERFS_I(inode)->tailpack));
 48		return 0;
 49	}
 50
 51	/* fast out for when nothing needs to be done */
 52	if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
 53	     !tail_has_to_be_packed(inode)) &&
 54	    REISERFS_I(inode)->i_prealloc_count <= 0) {
 55		mutex_unlock(&(REISERFS_I(inode)->tailpack));
 56		return 0;
 57	}
 58
 59	reiserfs_write_lock(inode->i_sb);
 60	/* freeing preallocation only involves relogging blocks that
 
 61	 * are already in the current transaction.  preallocation gets
 62	 * freed at the end of each transaction, so it is impossible for
 63	 * us to log any additional blocks (including quota blocks)
 64	 */
 65	err = journal_begin(&th, inode->i_sb, 1);
 66	if (err) {
 67		/* uh oh, we can't allow the inode to go away while there
 
 68		 * is still preallocation blocks pending.  Try to join the
 69		 * aborted transaction
 70		 */
 71		jbegin_failure = err;
 72		err = journal_join_abort(&th, inode->i_sb, 1);
 73
 74		if (err) {
 75			/* hmpf, our choices here aren't good.  We can pin the inode
 76			 * which will disallow unmount from every happening, we can
 77			 * do nothing, which will corrupt random memory on unmount,
 78			 * or we can forcibly remove the file from the preallocation
 79			 * list, which will leak blocks on disk.  Lets pin the inode
 
 
 80			 * and let the admin know what is going on.
 81			 */
 82			igrab(inode);
 83			reiserfs_warning(inode->i_sb, "clm-9001",
 84					 "pinning inode %lu because the "
 85					 "preallocation can't be freed",
 86					 inode->i_ino);
 87			goto out;
 88		}
 89	}
 90	reiserfs_update_inode_transaction(inode);
 91
 92#ifdef REISERFS_PREALLOCATE
 93	reiserfs_discard_prealloc(&th, inode);
 94#endif
 95	err = journal_end(&th, inode->i_sb, 1);
 96
 97	/* copy back the error code from journal_begin */
 98	if (!err)
 99		err = jbegin_failure;
100
101	if (!err &&
102	    (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
103	    tail_has_to_be_packed(inode)) {
104
105		/* if regular file is released by last holder and it has been
106		   appended (we append by unformatted node only) or its direct
107		   item(s) had to be converted, then it may have to be
108		   indirect2direct converted */
 
 
109		err = reiserfs_truncate_file(inode, 0);
110	}
111      out:
112	reiserfs_write_unlock(inode->i_sb);
113	mutex_unlock(&(REISERFS_I(inode)->tailpack));
114	return err;
115}
116
117static int reiserfs_file_open(struct inode *inode, struct file *file)
118{
119	int err = dquot_file_open(inode, file);
 
 
120        if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
121		/* somebody might be tailpacking on final close; wait for it */
122		mutex_lock(&(REISERFS_I(inode)->tailpack));
123		atomic_inc(&REISERFS_I(inode)->openers);
124		mutex_unlock(&(REISERFS_I(inode)->tailpack));
125	}
126	return err;
127}
128
129static void reiserfs_vfs_truncate_file(struct inode *inode)
130{
131	mutex_lock(&(REISERFS_I(inode)->tailpack));
132	reiserfs_truncate_file(inode, 1);
133	mutex_unlock(&(REISERFS_I(inode)->tailpack));
134}
135
136/* Sync a reiserfs file. */
137
138/*
139 * FIXME: sync_mapping_buffers() never has anything to sync.  Can
140 * be removed...
141 */
142
143static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end,
144			      int datasync)
145{
146	struct inode *inode = filp->f_mapping->host;
147	int err;
148	int barrier_done;
149
150	err = filemap_write_and_wait_range(inode->i_mapping, start, end);
151	if (err)
152		return err;
153
154	mutex_lock(&inode->i_mutex);
155	BUG_ON(!S_ISREG(inode->i_mode));
156	err = sync_mapping_buffers(inode->i_mapping);
157	reiserfs_write_lock(inode->i_sb);
158	barrier_done = reiserfs_commit_for_inode(inode);
159	reiserfs_write_unlock(inode->i_sb);
160	if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
161		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
162	mutex_unlock(&inode->i_mutex);
163	if (barrier_done < 0)
164		return barrier_done;
165	return (err < 0) ? -EIO : 0;
166}
167
168/* taken fs/buffer.c:__block_commit_write */
169int reiserfs_commit_page(struct inode *inode, struct page *page,
170			 unsigned from, unsigned to)
171{
172	unsigned block_start, block_end;
173	int partial = 0;
174	unsigned blocksize;
175	struct buffer_head *bh, *head;
176	unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
177	int new;
178	int logit = reiserfs_file_data_log(inode);
179	struct super_block *s = inode->i_sb;
180	int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
181	struct reiserfs_transaction_handle th;
182	int ret = 0;
183
184	th.t_trans_id = 0;
185	blocksize = 1 << inode->i_blkbits;
186
187	if (logit) {
188		reiserfs_write_lock(s);
189		ret = journal_begin(&th, s, bh_per_page + 1);
190		if (ret)
191			goto drop_write_lock;
192		reiserfs_update_inode_transaction(inode);
193	}
194	for (bh = head = page_buffers(page), block_start = 0;
195	     bh != head || !block_start;
196	     block_start = block_end, bh = bh->b_this_page) {
197
198		new = buffer_new(bh);
199		clear_buffer_new(bh);
200		block_end = block_start + blocksize;
201		if (block_end <= from || block_start >= to) {
202			if (!buffer_uptodate(bh))
203				partial = 1;
204		} else {
205			set_buffer_uptodate(bh);
206			if (logit) {
207				reiserfs_prepare_for_journal(s, bh, 1);
208				journal_mark_dirty(&th, s, bh);
209			} else if (!buffer_dirty(bh)) {
210				mark_buffer_dirty(bh);
211				/* do data=ordered on any page past the end
 
212				 * of file and any buffer marked BH_New.
213				 */
214				if (reiserfs_data_ordered(inode->i_sb) &&
215				    (new || page->index >= i_size_index)) {
216					reiserfs_add_ordered_list(inode, bh);
217				}
218			}
219		}
220	}
221	if (logit) {
222		ret = journal_end(&th, s, bh_per_page + 1);
223	      drop_write_lock:
224		reiserfs_write_unlock(s);
225	}
226	/*
227	 * If this is a partial write which happened to make all buffers
228	 * uptodate then we can optimize away a bogus readpage() for
229	 * the next read(). Here we 'discover' whether the page went
230	 * uptodate as a result of this (potentially partial) write.
231	 */
232	if (!partial)
233		SetPageUptodate(page);
234	return ret;
235}
236
237/* Write @count bytes at position @ppos in a file indicated by @file
238   from the buffer @buf.
239
240   generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
241   something simple that works.  It is not for serious use by general purpose filesystems, excepting the one that it was
242   written for (ext2/3).  This is for several reasons:
243
244   * It has no understanding of any filesystem specific optimizations.
245
246   * It enters the filesystem repeatedly for each page that is written.
247
248   * It depends on reiserfs_get_block() function which if implemented by reiserfs performs costly search_by_key
249   * operation for each page it is supplied with. By contrast reiserfs_file_write() feeds as much as possible at a time
250   * to reiserfs which allows for fewer tree traversals.
251
252   * Each indirect pointer insertion takes a lot of cpu, because it involves memory moves inside of blocks.
253
254   * Asking the block allocation code for blocks one at a time is slightly less efficient.
255
256   All of these reasons for not using only generic file write were understood back when reiserfs was first miscoded to
257   use it, but we were in a hurry to make code freeze, and so it couldn't be revised then.  This new code should make
258   things right finally.
259
260   Future Features: providing search_by_key with hints.
261
262*/
263static ssize_t reiserfs_file_write(struct file *file,	/* the file we are going to write into */
264				   const char __user * buf,	/*  pointer to user supplied data
265								   (in userspace) */
266				   size_t count,	/* amount of bytes to write */
267				   loff_t * ppos	/* pointer to position in file that we start writing at. Should be updated to
268							 * new current position before returning. */
269				   )
270{
271	struct inode *inode = file->f_path.dentry->d_inode;	// Inode of the file that we are writing to.
272	/* To simplify coding at this time, we store
273	   locked pages in array for now */
274	struct reiserfs_transaction_handle th;
275	th.t_trans_id = 0;
276
277	/* If a filesystem is converted from 3.5 to 3.6, we'll have v3.5 items
278	* lying around (most of the disk, in fact). Despite the filesystem
279	* now being a v3.6 format, the old items still can't support large
280	* file sizes. Catch this case here, as the rest of the VFS layer is
281	* oblivious to the different limitations between old and new items.
282	* reiserfs_setattr catches this for truncates. This chunk is lifted
283	* from generic_write_checks. */
284	if (get_inode_item_key_version (inode) == KEY_FORMAT_3_5 &&
285	    *ppos + count > MAX_NON_LFS) {
286		if (*ppos >= MAX_NON_LFS) {
287			return -EFBIG;
288		}
289		if (count > MAX_NON_LFS - (unsigned long)*ppos)
290			count = MAX_NON_LFS - (unsigned long)*ppos;
291	}
292
293	return do_sync_write(file, buf, count, ppos);
294}
295
296const struct file_operations reiserfs_file_operations = {
297	.read = do_sync_read,
298	.write = reiserfs_file_write,
299	.unlocked_ioctl = reiserfs_ioctl,
300#ifdef CONFIG_COMPAT
301	.compat_ioctl = reiserfs_compat_ioctl,
302#endif
303	.mmap = generic_file_mmap,
304	.open = reiserfs_file_open,
305	.release = reiserfs_file_release,
306	.fsync = reiserfs_sync_file,
307	.aio_read = generic_file_aio_read,
308	.aio_write = generic_file_aio_write,
309	.splice_read = generic_file_splice_read,
310	.splice_write = generic_file_splice_write,
311	.llseek = generic_file_llseek,
312};
313
314const struct inode_operations reiserfs_file_inode_operations = {
315	.truncate = reiserfs_vfs_truncate_file,
316	.setattr = reiserfs_setattr,
317	.setxattr = reiserfs_setxattr,
318	.getxattr = reiserfs_getxattr,
319	.listxattr = reiserfs_listxattr,
320	.removexattr = reiserfs_removexattr,
321	.permission = reiserfs_permission,
322	.get_acl = reiserfs_get_acl,
 
323};
v4.6
  1/*
  2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
  3 */
  4
  5#include <linux/time.h>
  6#include "reiserfs.h"
  7#include "acl.h"
  8#include "xattr.h"
  9#include <linux/uaccess.h>
 10#include <linux/pagemap.h>
 11#include <linux/swap.h>
 12#include <linux/writeback.h>
 13#include <linux/blkdev.h>
 14#include <linux/buffer_head.h>
 15#include <linux/quotaops.h>
 16
 17/*
 18 * We pack the tails of files on file close, not at the time they are written.
 19 * This implies an unnecessary copy of the tail and an unnecessary indirect item
 20 * insertion/balancing, for files that are written in one write.
 21 * It avoids unnecessary tail packings (balances) for files that are written in
 22 * multiple writes and are small enough to have tails.
 23 *
 24 * file_release is called by the VFS layer when the file is closed.  If
 25 * this is the last open file descriptor, and the file
 26 * small enough to have a tail, and the tail is currently in an
 27 * unformatted node, the tail is converted back into a direct item.
 28 *
 29 * We use reiserfs_truncate_file to pack the tail, since it already has
 30 * all the conditions coded.
 31 */
 32static int reiserfs_file_release(struct inode *inode, struct file *filp)
 33{
 34
 35	struct reiserfs_transaction_handle th;
 36	int err;
 37	int jbegin_failure = 0;
 38
 39	BUG_ON(!S_ISREG(inode->i_mode));
 40
 41        if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
 42		return 0;
 43
 44	mutex_lock(&REISERFS_I(inode)->tailpack);
 45
 46        if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
 47		mutex_unlock(&REISERFS_I(inode)->tailpack);
 48		return 0;
 49	}
 50
 51	/* fast out for when nothing needs to be done */
 52	if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
 53	     !tail_has_to_be_packed(inode)) &&
 54	    REISERFS_I(inode)->i_prealloc_count <= 0) {
 55		mutex_unlock(&REISERFS_I(inode)->tailpack);
 56		return 0;
 57	}
 58
 59	reiserfs_write_lock(inode->i_sb);
 60	/*
 61	 * freeing preallocation only involves relogging blocks that
 62	 * are already in the current transaction.  preallocation gets
 63	 * freed at the end of each transaction, so it is impossible for
 64	 * us to log any additional blocks (including quota blocks)
 65	 */
 66	err = journal_begin(&th, inode->i_sb, 1);
 67	if (err) {
 68		/*
 69		 * uh oh, we can't allow the inode to go away while there
 70		 * is still preallocation blocks pending.  Try to join the
 71		 * aborted transaction
 72		 */
 73		jbegin_failure = err;
 74		err = journal_join_abort(&th, inode->i_sb);
 75
 76		if (err) {
 77			/*
 78			 * hmpf, our choices here aren't good.  We can pin
 79			 * the inode which will disallow unmount from ever
 80			 * happening, we can do nothing, which will corrupt
 81			 * random memory on unmount, or we can forcibly
 82			 * remove the file from the preallocation list, which
 83			 * will leak blocks on disk.  Lets pin the inode
 84			 * and let the admin know what is going on.
 85			 */
 86			igrab(inode);
 87			reiserfs_warning(inode->i_sb, "clm-9001",
 88					 "pinning inode %lu because the "
 89					 "preallocation can't be freed",
 90					 inode->i_ino);
 91			goto out;
 92		}
 93	}
 94	reiserfs_update_inode_transaction(inode);
 95
 96#ifdef REISERFS_PREALLOCATE
 97	reiserfs_discard_prealloc(&th, inode);
 98#endif
 99	err = journal_end(&th);
100
101	/* copy back the error code from journal_begin */
102	if (!err)
103		err = jbegin_failure;
104
105	if (!err &&
106	    (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
107	    tail_has_to_be_packed(inode)) {
108
109		/*
110		 * if regular file is released by last holder and it has been
111		 * appended (we append by unformatted node only) or its direct
112		 * item(s) had to be converted, then it may have to be
113		 * indirect2direct converted
114		 */
115		err = reiserfs_truncate_file(inode, 0);
116	}
117out:
118	reiserfs_write_unlock(inode->i_sb);
119	mutex_unlock(&REISERFS_I(inode)->tailpack);
120	return err;
121}
122
123static int reiserfs_file_open(struct inode *inode, struct file *file)
124{
125	int err = dquot_file_open(inode, file);
126
127	/* somebody might be tailpacking on final close; wait for it */
128        if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
129		mutex_lock(&REISERFS_I(inode)->tailpack);
 
130		atomic_inc(&REISERFS_I(inode)->openers);
131		mutex_unlock(&REISERFS_I(inode)->tailpack);
132	}
133	return err;
134}
135
136void reiserfs_vfs_truncate_file(struct inode *inode)
137{
138	mutex_lock(&REISERFS_I(inode)->tailpack);
139	reiserfs_truncate_file(inode, 1);
140	mutex_unlock(&REISERFS_I(inode)->tailpack);
141}
142
143/* Sync a reiserfs file. */
144
145/*
146 * FIXME: sync_mapping_buffers() never has anything to sync.  Can
147 * be removed...
148 */
149
150static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end,
151			      int datasync)
152{
153	struct inode *inode = filp->f_mapping->host;
154	int err;
155	int barrier_done;
156
157	err = filemap_write_and_wait_range(inode->i_mapping, start, end);
158	if (err)
159		return err;
160
161	inode_lock(inode);
162	BUG_ON(!S_ISREG(inode->i_mode));
163	err = sync_mapping_buffers(inode->i_mapping);
164	reiserfs_write_lock(inode->i_sb);
165	barrier_done = reiserfs_commit_for_inode(inode);
166	reiserfs_write_unlock(inode->i_sb);
167	if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
168		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
169	inode_unlock(inode);
170	if (barrier_done < 0)
171		return barrier_done;
172	return (err < 0) ? -EIO : 0;
173}
174
175/* taken fs/buffer.c:__block_commit_write */
176int reiserfs_commit_page(struct inode *inode, struct page *page,
177			 unsigned from, unsigned to)
178{
179	unsigned block_start, block_end;
180	int partial = 0;
181	unsigned blocksize;
182	struct buffer_head *bh, *head;
183	unsigned long i_size_index = inode->i_size >> PAGE_SHIFT;
184	int new;
185	int logit = reiserfs_file_data_log(inode);
186	struct super_block *s = inode->i_sb;
187	int bh_per_page = PAGE_SIZE / s->s_blocksize;
188	struct reiserfs_transaction_handle th;
189	int ret = 0;
190
191	th.t_trans_id = 0;
192	blocksize = 1 << inode->i_blkbits;
193
194	if (logit) {
195		reiserfs_write_lock(s);
196		ret = journal_begin(&th, s, bh_per_page + 1);
197		if (ret)
198			goto drop_write_lock;
199		reiserfs_update_inode_transaction(inode);
200	}
201	for (bh = head = page_buffers(page), block_start = 0;
202	     bh != head || !block_start;
203	     block_start = block_end, bh = bh->b_this_page) {
204
205		new = buffer_new(bh);
206		clear_buffer_new(bh);
207		block_end = block_start + blocksize;
208		if (block_end <= from || block_start >= to) {
209			if (!buffer_uptodate(bh))
210				partial = 1;
211		} else {
212			set_buffer_uptodate(bh);
213			if (logit) {
214				reiserfs_prepare_for_journal(s, bh, 1);
215				journal_mark_dirty(&th, bh);
216			} else if (!buffer_dirty(bh)) {
217				mark_buffer_dirty(bh);
218				/*
219				 * do data=ordered on any page past the end
220				 * of file and any buffer marked BH_New.
221				 */
222				if (reiserfs_data_ordered(inode->i_sb) &&
223				    (new || page->index >= i_size_index)) {
224					reiserfs_add_ordered_list(inode, bh);
225				}
226			}
227		}
228	}
229	if (logit) {
230		ret = journal_end(&th);
231drop_write_lock:
232		reiserfs_write_unlock(s);
233	}
234	/*
235	 * If this is a partial write which happened to make all buffers
236	 * uptodate then we can optimize away a bogus readpage() for
237	 * the next read(). Here we 'discover' whether the page went
238	 * uptodate as a result of this (potentially partial) write.
239	 */
240	if (!partial)
241		SetPageUptodate(page);
242	return ret;
243}
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245const struct file_operations reiserfs_file_operations = {
 
 
246	.unlocked_ioctl = reiserfs_ioctl,
247#ifdef CONFIG_COMPAT
248	.compat_ioctl = reiserfs_compat_ioctl,
249#endif
250	.mmap = generic_file_mmap,
251	.open = reiserfs_file_open,
252	.release = reiserfs_file_release,
253	.fsync = reiserfs_sync_file,
254	.read_iter = generic_file_read_iter,
255	.write_iter = generic_file_write_iter,
256	.splice_read = generic_file_splice_read,
257	.splice_write = iter_file_splice_write,
258	.llseek = generic_file_llseek,
259};
260
261const struct inode_operations reiserfs_file_inode_operations = {
 
262	.setattr = reiserfs_setattr,
263	.setxattr = reiserfs_setxattr,
264	.getxattr = reiserfs_getxattr,
265	.listxattr = reiserfs_listxattr,
266	.removexattr = reiserfs_removexattr,
267	.permission = reiserfs_permission,
268	.get_acl = reiserfs_get_acl,
269	.set_acl = reiserfs_set_acl,
270};