Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
 
 
  3 * This file contians vfs address (mmap) ops for 9P2000.
  4 *
  5 *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
  6 *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/errno.h>
 11#include <linux/fs.h>
 12#include <linux/file.h>
 13#include <linux/stat.h>
 14#include <linux/string.h>
 15#include <linux/inet.h>
 16#include <linux/pagemap.h>
 
 17#include <linux/sched.h>
 18#include <linux/swap.h>
 19#include <linux/uio.h>
 20#include <linux/netfs.h>
 21#include <net/9p/9p.h>
 22#include <net/9p/client.h>
 23
 24#include "v9fs.h"
 25#include "v9fs_vfs.h"
 26#include "cache.h"
 27#include "fid.h"
 28
 29/**
 30 * v9fs_issue_read - Issue a read from 9P
 31 * @subreq: The read to make
 
 
 
 32 */
 33static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
 34{
 35	struct netfs_io_request *rreq = subreq->rreq;
 36	struct p9_fid *fid = rreq->netfs_priv;
 
 37	struct iov_iter to;
 38	loff_t pos = subreq->start + subreq->transferred;
 39	size_t len = subreq->len   - subreq->transferred;
 40	int total, err;
 41
 42	iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len);
 43
 44	total = p9_client_read(fid, pos, &to, &err);
 45
 46	/* if we just extended the file size, any portion not in
 47	 * cache won't be on server and is zeroes */
 48	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
 49
 50	netfs_subreq_terminated(subreq, err ?: total, false);
 51}
 52
 53/**
 54 * v9fs_init_request - Initialise a read request
 55 * @rreq: The read request
 56 * @file: The file being read from
 57 */
 58static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
 59{
 60	struct inode *inode = file_inode(file);
 61	struct v9fs_inode *v9inode = V9FS_I(inode);
 62	struct p9_fid *fid = file->private_data;
 63
 64	BUG_ON(!fid);
 
 
 65
 66	/* we might need to read from a fid that was opened write-only
 67	 * for read-modify-write of page cache, use the writeback fid
 68	 * for that */
 69	if (rreq->origin == NETFS_READ_FOR_WRITE &&
 70			(fid->mode & O_ACCMODE) == O_WRONLY) {
 71		fid = v9inode->writeback_fid;
 72		BUG_ON(!fid);
 73	}
 74
 75	p9_fid_get(fid);
 76	rreq->netfs_priv = fid;
 77	return 0;
 78}
 79
 80/**
 81 * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
 82 * @rreq: The I/O request to clean up
 
 
 
 83 */
 84static void v9fs_free_request(struct netfs_io_request *rreq)
 85{
 86	struct p9_fid *fid = rreq->netfs_priv;
 87
 88	p9_fid_put(fid);
 
 
 89}
 90
 91/**
 92 * v9fs_begin_cache_operation - Begin a cache operation for a read
 93 * @rreq: The read request
 
 
 
 
 
 94 */
 95static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
 
 
 96{
 97#ifdef CONFIG_9P_FSCACHE
 98	struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
 
 
 
 99
100	return fscache_begin_read_operation(&rreq->cache_resources, cookie);
101#else
102	return -ENOBUFS;
103#endif
104}
105
106const struct netfs_request_ops v9fs_req_ops = {
107	.init_request		= v9fs_init_request,
108	.free_request		= v9fs_free_request,
109	.begin_cache_operation	= v9fs_begin_cache_operation,
110	.issue_read		= v9fs_issue_read,
111};
112
113/**
114 * v9fs_release_folio - release the private state associated with a folio
115 * @folio: The folio to be released
116 * @gfp: The caller's allocation restrictions
117 *
118 * Returns true if the page can be released, false otherwise.
119 */
120
121static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
122{
123	struct inode *inode = folio_inode(folio);
124
125	if (folio_test_private(folio))
126		return false;
127#ifdef CONFIG_9P_FSCACHE
128	if (folio_test_fscache(folio)) {
129		if (current_is_kswapd() || !(gfp & __GFP_FS))
130			return false;
131		folio_wait_fscache(folio);
132	}
133#endif
134	fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
135	return true;
136}
137
138static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
139				 size_t length)
140{
141	folio_wait_fscache(folio);
142}
 
143
144static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
145				     bool was_async)
146{
147	struct v9fs_inode *v9inode = priv;
148	__le32 version;
149
150	if (IS_ERR_VALUE(transferred_or_error) &&
151	    transferred_or_error != -ENOBUFS) {
152		version = cpu_to_le32(v9inode->qid.version);
153		fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
154				   i_size_read(&v9inode->netfs.inode), 0);
155	}
156}
157
158static int v9fs_vfs_write_folio_locked(struct folio *folio)
159{
160	struct inode *inode = folio_inode(folio);
161	struct v9fs_inode *v9inode = V9FS_I(inode);
162	struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
163	loff_t start = folio_pos(folio);
164	loff_t i_size = i_size_read(inode);
165	struct iov_iter from;
166	size_t len = folio_size(folio);
167	int err;
168
169	if (start >= i_size)
170		return 0; /* Simultaneous truncation occurred */
171
172	len = min_t(loff_t, i_size - start, len);
173
174	iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len);
 
 
 
175
176	/* We should have writeback_fid always set */
177	BUG_ON(!v9inode->writeback_fid);
178
179	folio_wait_fscache(folio);
180	folio_start_writeback(folio);
181
182	p9_client_write(v9inode->writeback_fid, start, &from, &err);
183
184	if (err == 0 &&
185	    fscache_cookie_enabled(cookie) &&
186	    test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
187		folio_start_fscache(folio);
188		fscache_write_to_cache(v9fs_inode_cookie(v9inode),
189				       folio_mapping(folio), start, len, i_size,
190				       v9fs_write_to_cache_done, v9inode,
191				       true);
192	}
193
194	folio_end_writeback(folio);
195	return err;
196}
197
198static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
199{
200	struct folio *folio = page_folio(page);
201	int retval;
202
203	p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
204
205	retval = v9fs_vfs_write_folio_locked(folio);
206	if (retval < 0) {
207		if (retval == -EAGAIN) {
208			folio_redirty_for_writepage(wbc, folio);
209			retval = 0;
210		} else {
211			mapping_set_error(folio_mapping(folio), retval);
 
212		}
213	} else
214		retval = 0;
215
216	folio_unlock(folio);
217	return retval;
218}
219
220static int v9fs_launder_folio(struct folio *folio)
 
 
 
 
 
221{
222	int retval;
 
223
224	if (folio_clear_dirty_for_io(folio)) {
225		retval = v9fs_vfs_write_folio_locked(folio);
 
226		if (retval)
227			return retval;
228	}
229	folio_wait_fscache(folio);
230	return 0;
231}
232
233/**
234 * v9fs_direct_IO - 9P address space operation for direct I/O
235 * @iocb: target I/O control block
236 * @iter: The data/buffer to use
237 *
238 * The presence of v9fs_direct_IO() in the address space ops vector
239 * allowes open() O_DIRECT flags which would have failed otherwise.
240 *
241 * In the non-cached mode, we shunt off direct read and write requests before
242 * the VFS gets them, so this method should never be called.
243 *
244 * Direct IO is not 'yet' supported in the cached mode. Hence when
245 * this routine is called through generic_file_aio_read(), the read/write fails
246 * with an error.
247 *
248 */
249static ssize_t
250v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
251{
252	struct file *file = iocb->ki_filp;
253	loff_t pos = iocb->ki_pos;
254	ssize_t n;
255	int err = 0;
256
257	if (iov_iter_rw(iter) == WRITE) {
258		n = p9_client_write(file->private_data, pos, iter, &err);
259		if (n) {
260			struct inode *inode = file_inode(file);
261			loff_t i_size = i_size_read(inode);
262
263			if (pos + n > i_size)
264				inode_add_bytes(inode, pos + n - i_size);
265		}
266	} else {
267		n = p9_client_read(file->private_data, pos, iter, &err);
268	}
269	return n ? n : err;
270}
271
272static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
273			    loff_t pos, unsigned int len,
274			    struct page **subpagep, void **fsdata)
275{
276	int retval;
277	struct folio *folio;
278	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
 
 
 
279
280	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
281
 
 
 
 
 
 
 
282	BUG_ON(!v9inode->writeback_fid);
 
 
283
284	/* Prefetch area to be written into the cache if we're caching this
285	 * file.  We need to do this before we get a lock on the page in case
286	 * there's more than one writer competing for the same cache block.
287	 */
288	retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
289	if (retval < 0)
290		return retval;
291
292	*subpagep = &folio->page;
 
 
 
 
 
293	return retval;
294}
295
296static int v9fs_write_end(struct file *filp, struct address_space *mapping,
297			  loff_t pos, unsigned int len, unsigned int copied,
298			  struct page *subpage, void *fsdata)
299{
300	loff_t last_pos = pos + copied;
301	struct folio *folio = page_folio(subpage);
302	struct inode *inode = mapping->host;
303	struct v9fs_inode *v9inode = V9FS_I(inode);
304
305	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
306
307	if (!folio_test_uptodate(folio)) {
308		if (unlikely(copied < len)) {
309			copied = 0;
310			goto out;
 
 
311		}
312
313		folio_mark_uptodate(folio);
314	}
315
316	/*
317	 * No need to use i_size_read() here, the i_size
318	 * cannot change under us because we hold the i_mutex.
319	 */
320	if (last_pos > inode->i_size) {
321		inode_add_bytes(inode, last_pos - inode->i_size);
322		i_size_write(inode, last_pos);
323		fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
324	}
325	folio_mark_dirty(folio);
326out:
327	folio_unlock(folio);
328	folio_put(folio);
329
330	return copied;
331}
332
333#ifdef CONFIG_9P_FSCACHE
334/*
335 * Mark a page as having been made dirty and thus needing writeback.  We also
336 * need to pin the cache object to write back to.
337 */
338static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
339{
340	struct v9fs_inode *v9inode = V9FS_I(mapping->host);
341
342	return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
343}
344#else
345#define v9fs_dirty_folio filemap_dirty_folio
346#endif
347
348const struct address_space_operations v9fs_addr_operations = {
349	.read_folio = netfs_read_folio,
350	.readahead = netfs_readahead,
351	.dirty_folio = v9fs_dirty_folio,
352	.writepage = v9fs_vfs_writepage,
353	.write_begin = v9fs_write_begin,
354	.write_end = v9fs_write_end,
355	.release_folio = v9fs_release_folio,
356	.invalidate_folio = v9fs_invalidate_folio,
357	.launder_folio = v9fs_launder_folio,
358	.direct_IO = v9fs_direct_IO,
359};
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/fs/9p/vfs_addr.c
  4 *
  5 * This file contians vfs address (mmap) ops for 9P2000.
  6 *
  7 *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
  8 *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
  9 */
 10
 11#include <linux/module.h>
 12#include <linux/errno.h>
 13#include <linux/fs.h>
 14#include <linux/file.h>
 15#include <linux/stat.h>
 16#include <linux/string.h>
 17#include <linux/inet.h>
 18#include <linux/pagemap.h>
 19#include <linux/idr.h>
 20#include <linux/sched.h>
 
 21#include <linux/uio.h>
 22#include <linux/bvec.h>
 23#include <net/9p/9p.h>
 24#include <net/9p/client.h>
 25
 26#include "v9fs.h"
 27#include "v9fs_vfs.h"
 28#include "cache.h"
 29#include "fid.h"
 30
 31/**
 32 * v9fs_fid_readpage - read an entire page in from 9P
 33 *
 34 * @fid: fid being read
 35 * @page: structure to page
 36 *
 37 */
 38static int v9fs_fid_readpage(void *data, struct page *page)
 39{
 40	struct p9_fid *fid = data;
 41	struct inode *inode = page->mapping->host;
 42	struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
 43	struct iov_iter to;
 44	int retval, err;
 
 
 45
 46	p9_debug(P9_DEBUG_VFS, "\n");
 47
 48	BUG_ON(!PageLocked(page));
 49
 50	retval = v9fs_readpage_from_fscache(inode, page);
 51	if (retval == 0)
 52		return retval;
 53
 54	iov_iter_bvec(&to, READ, &bvec, 1, PAGE_SIZE);
 
 55
 56	retval = p9_client_read(fid, page_offset(page), &to, &err);
 57	if (err) {
 58		v9fs_uncache_page(inode, page);
 59		retval = err;
 60		goto done;
 61	}
 
 
 
 
 62
 63	zero_user(page, retval, PAGE_SIZE - retval);
 64	flush_dcache_page(page);
 65	SetPageUptodate(page);
 66
 67	v9fs_readpage_to_fscache(inode, page);
 68	retval = 0;
 
 
 
 
 
 
 69
 70done:
 71	unlock_page(page);
 72	return retval;
 73}
 74
 75/**
 76 * v9fs_vfs_readpage - read an entire page in from 9P
 77 *
 78 * @filp: file being read
 79 * @page: structure to page
 80 *
 81 */
 
 
 
 82
 83static int v9fs_vfs_readpage(struct file *filp, struct page *page)
 84{
 85	return v9fs_fid_readpage(filp->private_data, page);
 86}
 87
 88/**
 89 * v9fs_vfs_readpages - read a set of pages from 9P
 90 *
 91 * @filp: file being read
 92 * @mapping: the address space
 93 * @pages: list of pages to read
 94 * @nr_pages: count of pages to read
 95 *
 96 */
 97
 98static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
 99			     struct list_head *pages, unsigned nr_pages)
100{
101	int ret = 0;
102	struct inode *inode;
103
104	inode = mapping->host;
105	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp);
106
107	ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages);
108	if (ret == 0)
109		return ret;
110
111	ret = read_cache_pages(mapping, pages, v9fs_fid_readpage,
112			filp->private_data);
113	p9_debug(P9_DEBUG_VFS, "  = %d\n", ret);
114	return ret;
115}
 
 
 
116
117/**
118 * v9fs_release_page - release the private state associated with a page
 
 
119 *
120 * Returns 1 if the page can be released, false otherwise.
121 */
122
123static int v9fs_release_page(struct page *page, gfp_t gfp)
124{
125	if (PagePrivate(page))
126		return 0;
127	return v9fs_fscache_release_page(page, gfp);
 
 
 
 
 
 
 
 
 
 
128}
129
130/**
131 * v9fs_invalidate_page - Invalidate a page completely or partially
132 *
133 * @page: structure to page
134 * @offset: offset in the page
135 */
136
137static void v9fs_invalidate_page(struct page *page, unsigned int offset,
138				 unsigned int length)
139{
140	/*
141	 * If called with zero offset, we should release
142	 * the private state assocated with the page
143	 */
144	if (offset == 0 && length == PAGE_SIZE)
145		v9fs_fscache_invalidate_page(page);
 
 
 
146}
147
148static int v9fs_vfs_writepage_locked(struct page *page)
149{
150	struct inode *inode = page->mapping->host;
151	struct v9fs_inode *v9inode = V9FS_I(inode);
152	loff_t size = i_size_read(inode);
 
 
153	struct iov_iter from;
154	struct bio_vec bvec;
155	int err, len;
156
157	if (page->index == size >> PAGE_SHIFT)
158		len = size & ~PAGE_MASK;
159	else
160		len = PAGE_SIZE;
161
162	bvec.bv_page = page;
163	bvec.bv_offset = 0;
164	bvec.bv_len = len;
165	iov_iter_bvec(&from, WRITE, &bvec, 1, len);
166
167	/* We should have writeback_fid always set */
168	BUG_ON(!v9inode->writeback_fid);
169
170	set_page_writeback(page);
 
 
 
171
172	p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err);
 
 
 
 
 
 
 
 
173
174	end_page_writeback(page);
175	return err;
176}
177
178static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
179{
 
180	int retval;
181
182	p9_debug(P9_DEBUG_VFS, "page %p\n", page);
183
184	retval = v9fs_vfs_writepage_locked(page);
185	if (retval < 0) {
186		if (retval == -EAGAIN) {
187			redirty_page_for_writepage(wbc, page);
188			retval = 0;
189		} else {
190			SetPageError(page);
191			mapping_set_error(page->mapping, retval);
192		}
193	} else
194		retval = 0;
195
196	unlock_page(page);
197	return retval;
198}
199
200/**
201 * v9fs_launder_page - Writeback a dirty page
202 * Returns 0 on success.
203 */
204
205static int v9fs_launder_page(struct page *page)
206{
207	int retval;
208	struct inode *inode = page->mapping->host;
209
210	v9fs_fscache_wait_on_page_write(inode, page);
211	if (clear_page_dirty_for_io(page)) {
212		retval = v9fs_vfs_writepage_locked(page);
213		if (retval)
214			return retval;
215	}
 
216	return 0;
217}
218
219/**
220 * v9fs_direct_IO - 9P address space operation for direct I/O
221 * @iocb: target I/O control block
 
222 *
223 * The presence of v9fs_direct_IO() in the address space ops vector
224 * allowes open() O_DIRECT flags which would have failed otherwise.
225 *
226 * In the non-cached mode, we shunt off direct read and write requests before
227 * the VFS gets them, so this method should never be called.
228 *
229 * Direct IO is not 'yet' supported in the cached mode. Hence when
230 * this routine is called through generic_file_aio_read(), the read/write fails
231 * with an error.
232 *
233 */
234static ssize_t
235v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
236{
237	struct file *file = iocb->ki_filp;
238	loff_t pos = iocb->ki_pos;
239	ssize_t n;
240	int err = 0;
 
241	if (iov_iter_rw(iter) == WRITE) {
242		n = p9_client_write(file->private_data, pos, iter, &err);
243		if (n) {
244			struct inode *inode = file_inode(file);
245			loff_t i_size = i_size_read(inode);
 
246			if (pos + n > i_size)
247				inode_add_bytes(inode, pos + n - i_size);
248		}
249	} else {
250		n = p9_client_read(file->private_data, pos, iter, &err);
251	}
252	return n ? n : err;
253}
254
255static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
256			    loff_t pos, unsigned len, unsigned flags,
257			    struct page **pagep, void **fsdata)
258{
259	int retval = 0;
260	struct page *page;
261	struct v9fs_inode *v9inode;
262	pgoff_t index = pos >> PAGE_SHIFT;
263	struct inode *inode = mapping->host;
264
265
266	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
267
268	v9inode = V9FS_I(inode);
269start:
270	page = grab_cache_page_write_begin(mapping, index, flags);
271	if (!page) {
272		retval = -ENOMEM;
273		goto out;
274	}
275	BUG_ON(!v9inode->writeback_fid);
276	if (PageUptodate(page))
277		goto out;
278
279	if (len == PAGE_SIZE)
280		goto out;
 
 
 
 
 
281
282	retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
283	put_page(page);
284	if (!retval)
285		goto start;
286out:
287	*pagep = page;
288	return retval;
289}
290
291static int v9fs_write_end(struct file *filp, struct address_space *mapping,
292			  loff_t pos, unsigned len, unsigned copied,
293			  struct page *page, void *fsdata)
294{
295	loff_t last_pos = pos + copied;
296	struct inode *inode = page->mapping->host;
 
 
297
298	p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
299
300	if (!PageUptodate(page)) {
301		if (unlikely(copied < len)) {
302			copied = 0;
303			goto out;
304		} else if (len == PAGE_SIZE) {
305			SetPageUptodate(page);
306		}
 
 
307	}
 
308	/*
309	 * No need to use i_size_read() here, the i_size
310	 * cannot change under us because we hold the i_mutex.
311	 */
312	if (last_pos > inode->i_size) {
313		inode_add_bytes(inode, last_pos - inode->i_size);
314		i_size_write(inode, last_pos);
 
315	}
316	set_page_dirty(page);
317out:
318	unlock_page(page);
319	put_page(page);
320
321	return copied;
322}
323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
325const struct address_space_operations v9fs_addr_operations = {
326	.readpage = v9fs_vfs_readpage,
327	.readpages = v9fs_vfs_readpages,
328	.set_page_dirty = __set_page_dirty_nobuffers,
329	.writepage = v9fs_vfs_writepage,
330	.write_begin = v9fs_write_begin,
331	.write_end = v9fs_write_end,
332	.releasepage = v9fs_release_page,
333	.invalidatepage = v9fs_invalidate_page,
334	.launder_page = v9fs_launder_page,
335	.direct_IO = v9fs_direct_IO,
336};