Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * NILFS regular file handling primitives including fsync().
  4 *
  5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  6 *
 
 
 
 
 
 
 
 
 
 
  7 * Written by Amagai Yoshiji and Ryusuke Konishi.
  8 */
  9
 10#include <linux/fs.h>
 11#include <linux/mm.h>
 12#include <linux/writeback.h>
 13#include "nilfs.h"
 14#include "segment.h"
 15
 16int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 17{
 18	/*
 19	 * Called from fsync() system call
 20	 * This is the only entry point that can catch write and synch
 21	 * timing for both data blocks and intermediate blocks.
 22	 *
 23	 * This function should be implemented when the writeback function
 24	 * will be implemented.
 25	 */
 26	struct the_nilfs *nilfs;
 27	struct inode *inode = file->f_mapping->host;
 28	int err = 0;
 29
 30	if (nilfs_inode_dirty(inode)) {
 31		if (datasync)
 32			err = nilfs_construct_dsync_segment(inode->i_sb, inode,
 33							    start, end);
 34		else
 35			err = nilfs_construct_segment(inode->i_sb);
 36	}
 37
 38	nilfs = inode->i_sb->s_fs_info;
 39	if (!err)
 40		err = nilfs_flush_device(nilfs);
 41
 42	return err;
 43}
 44
 45static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
 46{
 47	struct vm_area_struct *vma = vmf->vma;
 48	struct folio *folio = page_folio(vmf->page);
 49	struct inode *inode = file_inode(vma->vm_file);
 50	struct nilfs_transaction_info ti;
 51	struct buffer_head *bh, *head;
 52	int ret = 0;
 53
 54	if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
 55		return VM_FAULT_SIGBUS; /* -ENOSPC */
 56
 57	sb_start_pagefault(inode->i_sb);
 58	folio_lock(folio);
 59	if (folio->mapping != inode->i_mapping ||
 60	    folio_pos(folio) >= i_size_read(inode) ||
 61	    !folio_test_uptodate(folio)) {
 62		folio_unlock(folio);
 63		ret = -EFAULT;	/* make the VM retry the fault */
 64		goto out;
 65	}
 66
 67	/*
 68	 * check to see if the folio is mapped already (no holes)
 69	 */
 70	if (folio_test_mappedtodisk(folio))
 71		goto mapped;
 72
 73	head = folio_buffers(folio);
 74	if (head) {
 75		int fully_mapped = 1;
 76
 77		bh = head;
 78		do {
 79			if (!buffer_mapped(bh)) {
 80				fully_mapped = 0;
 81				break;
 82			}
 83		} while (bh = bh->b_this_page, bh != head);
 84
 85		if (fully_mapped) {
 86			folio_set_mappedtodisk(folio);
 87			goto mapped;
 88		}
 89	}
 90	folio_unlock(folio);
 91
 92	/*
 93	 * fill hole blocks
 94	 */
 95	ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);
 96	/* never returns -ENOMEM, but may return -ENOSPC */
 97	if (unlikely(ret))
 98		goto out;
 99
100	file_update_time(vma->vm_file);
101	ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
102	if (ret) {
103		nilfs_transaction_abort(inode->i_sb);
104		goto out;
105	}
106	nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
107	nilfs_transaction_commit(inode->i_sb);
108
109 mapped:
110	/*
111	 * Since checksumming including data blocks is performed to determine
112	 * the validity of the log to be written and used for recovery, it is
113	 * necessary to wait for writeback to finish here, regardless of the
114	 * stable write requirement of the backing device.
115	 */
116	folio_wait_writeback(folio);
117 out:
118	sb_end_pagefault(inode->i_sb);
119	return vmf_fs_error(ret);
120}
121
122static const struct vm_operations_struct nilfs_file_vm_ops = {
123	.fault		= filemap_fault,
124	.map_pages	= filemap_map_pages,
125	.page_mkwrite	= nilfs_page_mkwrite,
126};
127
128static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
129{
130	file_accessed(file);
131	vma->vm_ops = &nilfs_file_vm_ops;
132	return 0;
133}
134
135/*
136 * We have mostly NULL's here: the current defaults are ok for
137 * the nilfs filesystem.
138 */
139const struct file_operations nilfs_file_operations = {
140	.llseek		= generic_file_llseek,
141	.read_iter	= generic_file_read_iter,
142	.write_iter	= generic_file_write_iter,
143	.unlocked_ioctl	= nilfs_ioctl,
144#ifdef CONFIG_COMPAT
145	.compat_ioctl	= nilfs_compat_ioctl,
146#endif	/* CONFIG_COMPAT */
147	.mmap		= nilfs_file_mmap,
148	.open		= generic_file_open,
149	/* .release	= nilfs_release_file, */
150	.fsync		= nilfs_sync_file,
151	.splice_read	= filemap_splice_read,
152	.splice_write   = iter_file_splice_write,
153};
154
155const struct inode_operations nilfs_file_inode_operations = {
156	.setattr	= nilfs_setattr,
157	.permission     = nilfs_permission,
158	.fiemap		= nilfs_fiemap,
159	.fileattr_get	= nilfs_fileattr_get,
160	.fileattr_set	= nilfs_fileattr_set,
161};
162
163/* end of file */
v4.10.11
 
  1/*
  2 * file.c - NILFS regular file handling primitives including fsync().
  3 *
  4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or
  9 * (at your option) any later version.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * Written by Amagai Yoshiji and Ryusuke Konishi.
 17 */
 18
 19#include <linux/fs.h>
 20#include <linux/mm.h>
 21#include <linux/writeback.h>
 22#include "nilfs.h"
 23#include "segment.h"
 24
 25int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 26{
 27	/*
 28	 * Called from fsync() system call
 29	 * This is the only entry point that can catch write and synch
 30	 * timing for both data blocks and intermediate blocks.
 31	 *
 32	 * This function should be implemented when the writeback function
 33	 * will be implemented.
 34	 */
 35	struct the_nilfs *nilfs;
 36	struct inode *inode = file->f_mapping->host;
 37	int err = 0;
 38
 39	if (nilfs_inode_dirty(inode)) {
 40		if (datasync)
 41			err = nilfs_construct_dsync_segment(inode->i_sb, inode,
 42							    start, end);
 43		else
 44			err = nilfs_construct_segment(inode->i_sb);
 45	}
 46
 47	nilfs = inode->i_sb->s_fs_info;
 48	if (!err)
 49		err = nilfs_flush_device(nilfs);
 50
 51	return err;
 52}
 53
 54static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 55{
 56	struct page *page = vmf->page;
 
 57	struct inode *inode = file_inode(vma->vm_file);
 58	struct nilfs_transaction_info ti;
 
 59	int ret = 0;
 60
 61	if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
 62		return VM_FAULT_SIGBUS; /* -ENOSPC */
 63
 64	sb_start_pagefault(inode->i_sb);
 65	lock_page(page);
 66	if (page->mapping != inode->i_mapping ||
 67	    page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
 68		unlock_page(page);
 
 69		ret = -EFAULT;	/* make the VM retry the fault */
 70		goto out;
 71	}
 72
 73	/*
 74	 * check to see if the page is mapped already (no holes)
 75	 */
 76	if (PageMappedToDisk(page))
 77		goto mapped;
 78
 79	if (page_has_buffers(page)) {
 80		struct buffer_head *bh, *head;
 81		int fully_mapped = 1;
 82
 83		bh = head = page_buffers(page);
 84		do {
 85			if (!buffer_mapped(bh)) {
 86				fully_mapped = 0;
 87				break;
 88			}
 89		} while (bh = bh->b_this_page, bh != head);
 90
 91		if (fully_mapped) {
 92			SetPageMappedToDisk(page);
 93			goto mapped;
 94		}
 95	}
 96	unlock_page(page);
 97
 98	/*
 99	 * fill hole blocks
100	 */
101	ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);
102	/* never returns -ENOMEM, but may return -ENOSPC */
103	if (unlikely(ret))
104		goto out;
105
106	file_update_time(vma->vm_file);
107	ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
108	if (ret) {
109		nilfs_transaction_abort(inode->i_sb);
110		goto out;
111	}
112	nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
113	nilfs_transaction_commit(inode->i_sb);
114
115 mapped:
116	wait_for_stable_page(page);
 
 
 
 
 
 
117 out:
118	sb_end_pagefault(inode->i_sb);
119	return block_page_mkwrite_return(ret);
120}
121
122static const struct vm_operations_struct nilfs_file_vm_ops = {
123	.fault		= filemap_fault,
124	.map_pages	= filemap_map_pages,
125	.page_mkwrite	= nilfs_page_mkwrite,
126};
127
128static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
129{
130	file_accessed(file);
131	vma->vm_ops = &nilfs_file_vm_ops;
132	return 0;
133}
134
135/*
136 * We have mostly NULL's here: the current defaults are ok for
137 * the nilfs filesystem.
138 */
139const struct file_operations nilfs_file_operations = {
140	.llseek		= generic_file_llseek,
141	.read_iter	= generic_file_read_iter,
142	.write_iter	= generic_file_write_iter,
143	.unlocked_ioctl	= nilfs_ioctl,
144#ifdef CONFIG_COMPAT
145	.compat_ioctl	= nilfs_compat_ioctl,
146#endif	/* CONFIG_COMPAT */
147	.mmap		= nilfs_file_mmap,
148	.open		= generic_file_open,
149	/* .release	= nilfs_release_file, */
150	.fsync		= nilfs_sync_file,
151	.splice_read	= generic_file_splice_read,
 
152};
153
154const struct inode_operations nilfs_file_inode_operations = {
155	.setattr	= nilfs_setattr,
156	.permission     = nilfs_permission,
157	.fiemap		= nilfs_fiemap,
 
 
158};
159
160/* end of file */