Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/nfs/read.c
  4 *
  5 * Block I/O for NFS
  6 *
  7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  8 * modified for async RPC by okir@monad.swb.de
  9 */
 10
 11#include <linux/time.h>
 12#include <linux/kernel.h>
 13#include <linux/errno.h>
 14#include <linux/fcntl.h>
 15#include <linux/stat.h>
 16#include <linux/mm.h>
 17#include <linux/slab.h>
 18#include <linux/task_io_accounting_ops.h>
 19#include <linux/pagemap.h>
 20#include <linux/sunrpc/clnt.h>
 21#include <linux/nfs_fs.h>
 22#include <linux/nfs_page.h>
 23#include <linux/module.h>
 24
 25#include "nfs4_fs.h"
 26#include "internal.h"
 27#include "iostat.h"
 28#include "fscache.h"
 29#include "pnfs.h"
 30#include "nfstrace.h"
 
 31
 32#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
 33
 34const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
 35static const struct nfs_rw_ops nfs_rw_read_ops;
 36
 37static struct kmem_cache *nfs_rdata_cachep;
 38
 39static struct nfs_pgio_header *nfs_readhdr_alloc(void)
 40{
 41	struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
 42
 43	if (p)
 44		p->rw_mode = FMODE_READ;
 45	return p;
 46}
 47
 48static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
 49{
 50	if (rhdr->res.scratch != NULL)
 51		kfree(rhdr->res.scratch);
 52	kmem_cache_free(nfs_rdata_cachep, rhdr);
 53}
 54
 55static int nfs_return_empty_folio(struct folio *folio)
 56{
 57	folio_zero_segment(folio, 0, folio_size(folio));
 58	folio_mark_uptodate(folio);
 59	folio_unlock(folio);
 60	return 0;
 61}
 62
 63void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
 64			      struct inode *inode, bool force_mds,
 65			      const struct nfs_pgio_completion_ops *compl_ops)
 66{
 67	struct nfs_server *server = NFS_SERVER(inode);
 68	const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
 69
 70#ifdef CONFIG_NFS_V4_1
 71	if (server->pnfs_curr_ld && !force_mds)
 72		pg_ops = server->pnfs_curr_ld->pg_read_ops;
 73#endif
 74	nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
 75			server->rsize, 0);
 76}
 77EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
 78
 79void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
 80{
 81	struct nfs_pgio_mirror *pgm;
 82	unsigned long npages;
 83
 84	nfs_pageio_complete(pgio);
 85
 86	/* It doesn't make sense to do mirrored reads! */
 87	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
 88
 89	pgm = &pgio->pg_mirrors[0];
 90	NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
 91	npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
 92	nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
 93}
 94
 95
 96void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
 97{
 98	struct nfs_pgio_mirror *mirror;
 99
100	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
101		pgio->pg_ops->pg_cleanup(pgio);
102
103	pgio->pg_ops = &nfs_pgio_rw_ops;
104
105	/* read path should never have more than one mirror */
106	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
107
108	mirror = &pgio->pg_mirrors[0];
109	mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
110}
111EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
112
113bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
114{
115	WARN_ON(hdr->res.scratch != NULL);
116	hdr->res.scratch = kmalloc(size, GFP_KERNEL);
117	return hdr->res.scratch != NULL;
118}
119EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
120
121static void nfs_readpage_release(struct nfs_page *req, int error)
122{
123	struct folio *folio = nfs_page_to_folio(req);
124
125	if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
126		folio_set_error(folio);
127	if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
128		if (nfs_netfs_folio_unlock(folio))
129			folio_unlock(folio);
130
131	nfs_release_request(req);
132}
133
134static void nfs_page_group_set_uptodate(struct nfs_page *req)
135{
136	if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
137		folio_mark_uptodate(nfs_page_to_folio(req));
138}
139
140static void nfs_read_completion(struct nfs_pgio_header *hdr)
141{
142	unsigned long bytes = 0;
143	int error;
144
145	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
146		goto out;
147	while (!list_empty(&hdr->pages)) {
148		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
149		struct folio *folio = nfs_page_to_folio(req);
150		unsigned long start = req->wb_pgbase;
151		unsigned long end = req->wb_pgbase + req->wb_bytes;
152
153		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
154			/* note: regions of the page not covered by a
155			 * request are zeroed in nfs_read_add_folio
156			 */
157			if (bytes > hdr->good_bytes) {
158				/* nothing in this request was good, so zero
159				 * the full extent of the request */
160				folio_zero_segment(folio, start, end);
161
162			} else if (hdr->good_bytes - bytes < req->wb_bytes) {
163				/* part of this request has good bytes, but
164				 * not all. zero the bad bytes */
165				start += hdr->good_bytes - bytes;
166				WARN_ON(start < req->wb_pgbase);
167				folio_zero_segment(folio, start, end);
168			}
169		}
170		error = 0;
171		bytes += req->wb_bytes;
172		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
173			if (bytes <= hdr->good_bytes)
174				nfs_page_group_set_uptodate(req);
175			else {
176				error = hdr->error;
177				xchg(&nfs_req_openctx(req)->error, error);
178			}
179		} else
180			nfs_page_group_set_uptodate(req);
181		nfs_list_remove_request(req);
182		nfs_readpage_release(req, error);
183	}
184	nfs_netfs_read_completion(hdr);
185
186out:
187	hdr->release(hdr);
188}
189
190static void nfs_initiate_read(struct nfs_pgio_header *hdr,
191			      struct rpc_message *msg,
192			      const struct nfs_rpc_ops *rpc_ops,
193			      struct rpc_task_setup *task_setup_data, int how)
194{
195	rpc_ops->read_setup(hdr, msg);
196	nfs_netfs_initiate_read(hdr);
197	trace_nfs_initiate_read(hdr);
198}
199
200static void
201nfs_async_read_error(struct list_head *head, int error)
202{
203	struct nfs_page	*req;
204
205	while (!list_empty(head)) {
206		req = nfs_list_entry(head->next);
207		nfs_list_remove_request(req);
208		nfs_readpage_release(req, error);
209	}
210}
211
212const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
213	.error_cleanup = nfs_async_read_error,
214	.completion = nfs_read_completion,
215};
216
217/*
218 * This is the callback from RPC telling us whether a reply was
219 * received or some error occurred (timeout or socket shutdown).
220 */
221static int nfs_readpage_done(struct rpc_task *task,
222			     struct nfs_pgio_header *hdr,
223			     struct inode *inode)
224{
225	int status = NFS_PROTO(inode)->read_done(task, hdr);
226	if (status != 0)
227		return status;
228
229	nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
230	trace_nfs_readpage_done(task, hdr);
231
232	if (task->tk_status == -ESTALE) {
233		nfs_set_inode_stale(inode);
234		nfs_mark_for_revalidate(inode);
235	}
236	return 0;
237}
238
239static void nfs_readpage_retry(struct rpc_task *task,
240			       struct nfs_pgio_header *hdr)
241{
242	struct nfs_pgio_args *argp = &hdr->args;
243	struct nfs_pgio_res  *resp = &hdr->res;
244
245	/* This is a short read! */
246	nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
247	trace_nfs_readpage_short(task, hdr);
248
249	/* Has the server at least made some progress? */
250	if (resp->count == 0) {
251		nfs_set_pgio_error(hdr, -EIO, argp->offset);
252		return;
253	}
254
255	/* For non rpc-based layout drivers, retry-through-MDS */
256	if (!task->tk_ops) {
257		hdr->pnfs_error = -EAGAIN;
258		return;
259	}
260
261	/* Yes, so retry the read at the end of the hdr */
262	hdr->mds_offset += resp->count;
263	argp->offset += resp->count;
264	argp->pgbase += resp->count;
265	argp->count -= resp->count;
266	resp->count = 0;
267	resp->eof = 0;
268	rpc_restart_call_prepare(task);
269}
270
271static void nfs_readpage_result(struct rpc_task *task,
272				struct nfs_pgio_header *hdr)
273{
274	if (hdr->res.eof) {
275		loff_t pos = hdr->args.offset + hdr->res.count;
276		unsigned int new = pos - hdr->io_start;
277
278		if (hdr->good_bytes > new) {
279			hdr->good_bytes = new;
280			set_bit(NFS_IOHDR_EOF, &hdr->flags);
281			clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
282		}
283	} else if (hdr->res.count < hdr->args.count)
284		nfs_readpage_retry(task, hdr);
285}
286
287int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
288		       struct nfs_open_context *ctx,
289		       struct folio *folio)
290{
291	struct inode *inode = folio_file_mapping(folio)->host;
292	struct nfs_server *server = NFS_SERVER(inode);
293	size_t fsize = folio_size(folio);
294	unsigned int rsize = server->rsize;
295	struct nfs_page *new;
296	unsigned int len, aligned_len;
297	int error;
298
299	len = nfs_folio_length(folio);
300	if (len == 0)
301		return nfs_return_empty_folio(folio);
302
303	aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
304
305	new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
306	if (IS_ERR(new)) {
307		error = PTR_ERR(new);
308		if (nfs_netfs_folio_unlock(folio))
309			folio_unlock(folio);
310		goto out;
311	}
312
313	if (len < fsize)
314		folio_zero_segment(folio, len, fsize);
315	if (!nfs_pageio_add_request(pgio, new)) {
316		nfs_list_remove_request(new);
317		error = pgio->pg_error;
318		nfs_readpage_release(new, error);
319		goto out;
320	}
321	return 0;
322out:
323	return error;
324}
325
326/*
327 * Read a page over NFS.
328 * We read the page synchronously in the following case:
329 *  -	The error flag is set for this page. This happens only when a
330 *	previous async read operation failed.
331 */
332int nfs_read_folio(struct file *file, struct folio *folio)
333{
334	struct inode *inode = file_inode(file);
335	struct nfs_pageio_descriptor pgio;
336	struct nfs_open_context *ctx;
337	int ret;
338
339	trace_nfs_aop_readpage(inode, folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
341	task_io_account_read(folio_size(folio));
342
343	/*
344	 * Try to flush any pending writes to the file..
345	 *
346	 * NOTE! Because we own the folio lock, there cannot
347	 * be any new pending writes generated at this point
348	 * for this folio (other folios can be written to).
349	 */
350	ret = nfs_wb_folio(inode, folio);
351	if (ret)
352		goto out_unlock;
353	if (folio_test_uptodate(folio))
354		goto out_unlock;
355
356	ret = -ESTALE;
357	if (NFS_STALE(inode))
358		goto out_unlock;
359
360	ret = nfs_netfs_read_folio(file, folio);
361	if (!ret)
362		goto out;
363
364	ctx = get_nfs_open_context(nfs_file_open_context(file));
365
366	xchg(&ctx->error, 0);
367	nfs_pageio_init_read(&pgio, inode, false,
368			     &nfs_async_read_completion_ops);
369
370	ret = nfs_read_add_folio(&pgio, ctx, folio);
371	if (ret)
372		goto out_put;
373
374	nfs_pageio_complete_read(&pgio);
375	ret = pgio.pg_error < 0 ? pgio.pg_error : 0;
376	if (!ret) {
377		ret = folio_wait_locked_killable(folio);
378		if (!folio_test_uptodate(folio) && !ret)
379			ret = xchg(&ctx->error, 0);
380	}
381out_put:
382	put_nfs_open_context(ctx);
383out:
384	trace_nfs_aop_readpage_done(inode, folio, ret);
385	return ret;
386out_unlock:
387	folio_unlock(folio);
388	goto out;
389}
390
391void nfs_readahead(struct readahead_control *ractl)
392{
393	struct nfs_pageio_descriptor pgio;
394	struct nfs_open_context *ctx;
395	unsigned int nr_pages = readahead_count(ractl);
396	struct file *file = ractl->file;
397	struct inode *inode = ractl->mapping->host;
398	struct folio *folio;
399	int ret;
400
401	trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
402	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
403	task_io_account_read(readahead_length(ractl));
404
405	ret = -ESTALE;
406	if (NFS_STALE(inode))
407		goto out;
408
409	ret = nfs_netfs_readahead(ractl);
410	if (!ret)
411		goto out;
412
413	if (file == NULL) {
414		ret = -EBADF;
415		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
416		if (ctx == NULL)
417			goto out;
418	} else
419		ctx = get_nfs_open_context(nfs_file_open_context(file));
420
421	nfs_pageio_init_read(&pgio, inode, false,
422			     &nfs_async_read_completion_ops);
423
424	while ((folio = readahead_folio(ractl)) != NULL) {
425		ret = nfs_read_add_folio(&pgio, ctx, folio);
426		if (ret)
427			break;
428	}
429
430	nfs_pageio_complete_read(&pgio);
 
431
432	put_nfs_open_context(ctx);
433out:
434	trace_nfs_aop_readahead_done(inode, nr_pages, ret);
435}
436
437int __init nfs_init_readpagecache(void)
438{
439	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
440					     sizeof(struct nfs_pgio_header),
441					     0, SLAB_HWCACHE_ALIGN,
442					     NULL);
443	if (nfs_rdata_cachep == NULL)
444		return -ENOMEM;
445
446	return 0;
447}
448
449void nfs_destroy_readpagecache(void)
450{
451	kmem_cache_destroy(nfs_rdata_cachep);
452}
453
454static const struct nfs_rw_ops nfs_rw_read_ops = {
455	.rw_alloc_header	= nfs_readhdr_alloc,
456	.rw_free_header		= nfs_readhdr_free,
457	.rw_done		= nfs_readpage_done,
458	.rw_result		= nfs_readpage_result,
459	.rw_initiate		= nfs_initiate_read,
460};
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/nfs/read.c
  4 *
  5 * Block I/O for NFS
  6 *
  7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  8 * modified for async RPC by okir@monad.swb.de
  9 */
 10
 11#include <linux/time.h>
 12#include <linux/kernel.h>
 13#include <linux/errno.h>
 14#include <linux/fcntl.h>
 15#include <linux/stat.h>
 16#include <linux/mm.h>
 17#include <linux/slab.h>
 18#include <linux/task_io_accounting_ops.h>
 19#include <linux/pagemap.h>
 20#include <linux/sunrpc/clnt.h>
 21#include <linux/nfs_fs.h>
 22#include <linux/nfs_page.h>
 23#include <linux/module.h>
 24
 25#include "nfs4_fs.h"
 26#include "internal.h"
 27#include "iostat.h"
 28#include "fscache.h"
 29#include "pnfs.h"
 30#include "nfstrace.h"
 31#include "delegation.h"
 32
 33#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
 34
 35const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
 36static const struct nfs_rw_ops nfs_rw_read_ops;
 37
 38static struct kmem_cache *nfs_rdata_cachep;
 39
 40static struct nfs_pgio_header *nfs_readhdr_alloc(void)
 41{
 42	struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
 43
 44	if (p)
 45		p->rw_mode = FMODE_READ;
 46	return p;
 47}
 48
 49static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
 50{
 51	kfree(rhdr->res.scratch);
 
 52	kmem_cache_free(nfs_rdata_cachep, rhdr);
 53}
 54
 55static int nfs_return_empty_folio(struct folio *folio)
 56{
 57	folio_zero_segment(folio, 0, folio_size(folio));
 58	folio_mark_uptodate(folio);
 59	folio_unlock(folio);
 60	return 0;
 61}
 62
 63void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
 64			      struct inode *inode, bool force_mds,
 65			      const struct nfs_pgio_completion_ops *compl_ops)
 66{
 67	struct nfs_server *server = NFS_SERVER(inode);
 68	const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
 69
 70#ifdef CONFIG_NFS_V4_1
 71	if (server->pnfs_curr_ld && !force_mds)
 72		pg_ops = server->pnfs_curr_ld->pg_read_ops;
 73#endif
 74	nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
 75			server->rsize, 0);
 76}
 77EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
 78
 79void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
 80{
 81	struct nfs_pgio_mirror *pgm;
 82	unsigned long npages;
 83
 84	nfs_pageio_complete(pgio);
 85
 86	/* It doesn't make sense to do mirrored reads! */
 87	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
 88
 89	pgm = &pgio->pg_mirrors[0];
 90	NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
 91	npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
 92	nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
 93}
 94
 95
 96void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
 97{
 98	struct nfs_pgio_mirror *mirror;
 99
100	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
101		pgio->pg_ops->pg_cleanup(pgio);
102
103	pgio->pg_ops = &nfs_pgio_rw_ops;
104
105	/* read path should never have more than one mirror */
106	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
107
108	mirror = &pgio->pg_mirrors[0];
109	mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
110}
111EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
112
113bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
114{
115	WARN_ON(hdr->res.scratch != NULL);
116	hdr->res.scratch = kmalloc(size, GFP_KERNEL);
117	return hdr->res.scratch != NULL;
118}
119EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
120
121static void nfs_readpage_release(struct nfs_page *req, int error)
122{
123	struct folio *folio = nfs_page_to_folio(req);
124
 
 
125	if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
126		if (nfs_netfs_folio_unlock(folio))
127			folio_unlock(folio);
128
129	nfs_release_request(req);
130}
131
132static void nfs_page_group_set_uptodate(struct nfs_page *req)
133{
134	if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
135		folio_mark_uptodate(nfs_page_to_folio(req));
136}
137
138static void nfs_read_completion(struct nfs_pgio_header *hdr)
139{
140	unsigned long bytes = 0;
141	int error;
142
143	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
144		goto out;
145	while (!list_empty(&hdr->pages)) {
146		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
147		struct folio *folio = nfs_page_to_folio(req);
148		unsigned long start = req->wb_pgbase;
149		unsigned long end = req->wb_pgbase + req->wb_bytes;
150
151		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
152			/* note: regions of the page not covered by a
153			 * request are zeroed in nfs_read_add_folio
154			 */
155			if (bytes > hdr->good_bytes) {
156				/* nothing in this request was good, so zero
157				 * the full extent of the request */
158				folio_zero_segment(folio, start, end);
159
160			} else if (hdr->good_bytes - bytes < req->wb_bytes) {
161				/* part of this request has good bytes, but
162				 * not all. zero the bad bytes */
163				start += hdr->good_bytes - bytes;
164				WARN_ON(start < req->wb_pgbase);
165				folio_zero_segment(folio, start, end);
166			}
167		}
168		error = 0;
169		bytes += req->wb_bytes;
170		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
171			if (bytes <= hdr->good_bytes)
172				nfs_page_group_set_uptodate(req);
173			else {
174				error = hdr->error;
175				xchg(&nfs_req_openctx(req)->error, error);
176			}
177		} else
178			nfs_page_group_set_uptodate(req);
179		nfs_list_remove_request(req);
180		nfs_readpage_release(req, error);
181	}
182	nfs_netfs_read_completion(hdr);
183
184out:
185	hdr->release(hdr);
186}
187
188static void nfs_initiate_read(struct nfs_pgio_header *hdr,
189			      struct rpc_message *msg,
190			      const struct nfs_rpc_ops *rpc_ops,
191			      struct rpc_task_setup *task_setup_data, int how)
192{
193	rpc_ops->read_setup(hdr, msg);
194	nfs_netfs_initiate_read(hdr);
195	trace_nfs_initiate_read(hdr);
196}
197
198static void
199nfs_async_read_error(struct list_head *head, int error)
200{
201	struct nfs_page	*req;
202
203	while (!list_empty(head)) {
204		req = nfs_list_entry(head->next);
205		nfs_list_remove_request(req);
206		nfs_readpage_release(req, error);
207	}
208}
209
210const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
211	.error_cleanup = nfs_async_read_error,
212	.completion = nfs_read_completion,
213};
214
215/*
216 * This is the callback from RPC telling us whether a reply was
217 * received or some error occurred (timeout or socket shutdown).
218 */
219static int nfs_readpage_done(struct rpc_task *task,
220			     struct nfs_pgio_header *hdr,
221			     struct inode *inode)
222{
223	int status = NFS_PROTO(inode)->read_done(task, hdr);
224	if (status != 0)
225		return status;
226
227	nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
228	trace_nfs_readpage_done(task, hdr);
229
230	if (task->tk_status == -ESTALE) {
231		nfs_set_inode_stale(inode);
232		nfs_mark_for_revalidate(inode);
233	}
234	return 0;
235}
236
237static void nfs_readpage_retry(struct rpc_task *task,
238			       struct nfs_pgio_header *hdr)
239{
240	struct nfs_pgio_args *argp = &hdr->args;
241	struct nfs_pgio_res  *resp = &hdr->res;
242
243	/* This is a short read! */
244	nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
245	trace_nfs_readpage_short(task, hdr);
246
247	/* Has the server at least made some progress? */
248	if (resp->count == 0) {
249		nfs_set_pgio_error(hdr, -EIO, argp->offset);
250		return;
251	}
252
253	/* For non rpc-based layout drivers, retry-through-MDS */
254	if (!task->tk_ops) {
255		hdr->pnfs_error = -EAGAIN;
256		return;
257	}
258
259	/* Yes, so retry the read at the end of the hdr */
260	hdr->mds_offset += resp->count;
261	argp->offset += resp->count;
262	argp->pgbase += resp->count;
263	argp->count -= resp->count;
264	resp->count = 0;
265	resp->eof = 0;
266	rpc_restart_call_prepare(task);
267}
268
269static void nfs_readpage_result(struct rpc_task *task,
270				struct nfs_pgio_header *hdr)
271{
272	if (hdr->res.eof) {
273		loff_t pos = hdr->args.offset + hdr->res.count;
274		unsigned int new = pos - hdr->io_start;
275
276		if (hdr->good_bytes > new) {
277			hdr->good_bytes = new;
278			set_bit(NFS_IOHDR_EOF, &hdr->flags);
279			clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
280		}
281	} else if (hdr->res.count < hdr->args.count)
282		nfs_readpage_retry(task, hdr);
283}
284
285int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
286		       struct nfs_open_context *ctx,
287		       struct folio *folio)
288{
289	struct inode *inode = folio->mapping->host;
290	struct nfs_server *server = NFS_SERVER(inode);
291	size_t fsize = folio_size(folio);
292	unsigned int rsize = server->rsize;
293	struct nfs_page *new;
294	unsigned int len, aligned_len;
295	int error;
296
297	len = nfs_folio_length(folio);
298	if (len == 0)
299		return nfs_return_empty_folio(folio);
300
301	aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
302
303	new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
304	if (IS_ERR(new)) {
305		error = PTR_ERR(new);
306		if (nfs_netfs_folio_unlock(folio))
307			folio_unlock(folio);
308		goto out;
309	}
310
311	if (len < fsize)
312		folio_zero_segment(folio, len, fsize);
313	if (!nfs_pageio_add_request(pgio, new)) {
314		nfs_list_remove_request(new);
315		error = pgio->pg_error;
316		nfs_readpage_release(new, error);
317		goto out;
318	}
319	return 0;
320out:
321	return error;
322}
323
324/*
325 * Actually read a folio over the wire.
 
 
 
326 */
327static int nfs_do_read_folio(struct file *file, struct folio *folio)
328{
329	struct inode *inode = file_inode(file);
330	struct nfs_pageio_descriptor pgio;
331	struct nfs_open_context *ctx;
332	int ret;
333
334	ctx = get_nfs_open_context(nfs_file_open_context(file));
335
336	xchg(&ctx->error, 0);
337	nfs_pageio_init_read(&pgio, inode, false,
338			     &nfs_async_read_completion_ops);
339
340	ret = nfs_read_add_folio(&pgio, ctx, folio);
341	if (ret)
342		goto out_put;
343
344	nfs_pageio_complete_read(&pgio);
345	nfs_update_delegated_atime(inode);
346	if (pgio.pg_error < 0) {
347		ret = pgio.pg_error;
348		goto out_put;
349	}
350
351	ret = folio_wait_locked_killable(folio);
352	if (!folio_test_uptodate(folio) && !ret)
353		ret = xchg(&ctx->error, 0);
354
355out_put:
356	put_nfs_open_context(ctx);
357	return ret;
358}
359
360/*
361 * Synchronously read a folio.
362 *
363 * This is not heavily used as most users to try an asynchronous
364 * large read through ->readahead first.
365 */
366int nfs_read_folio(struct file *file, struct folio *folio)
367{
368	struct inode *inode = file_inode(file);
369	loff_t pos = folio_pos(folio);
370	size_t len = folio_size(folio);
371	int ret;
372
373	trace_nfs_aop_readpage(inode, pos, len);
374	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
375	task_io_account_read(len);
376
377	/*
378	 * Try to flush any pending writes to the file..
379	 *
380	 * NOTE! Because we own the folio lock, there cannot
381	 * be any new pending writes generated at this point
382	 * for this folio (other folios can be written to).
383	 */
384	ret = nfs_wb_folio(inode, folio);
385	if (ret)
386		goto out_unlock;
387	if (folio_test_uptodate(folio))
388		goto out_unlock;
389
390	ret = -ESTALE;
391	if (NFS_STALE(inode))
392		goto out_unlock;
393
394	ret = nfs_netfs_read_folio(file, folio);
 
 
 
 
 
 
 
 
 
 
395	if (ret)
396		ret = nfs_do_read_folio(file, folio);
 
 
 
 
 
 
 
 
 
 
397out:
398	trace_nfs_aop_readpage_done(inode, pos, len, ret);
399	return ret;
400out_unlock:
401	folio_unlock(folio);
402	goto out;
403}
404
405void nfs_readahead(struct readahead_control *ractl)
406{
407	struct nfs_pageio_descriptor pgio;
408	struct nfs_open_context *ctx;
409	unsigned int nr_pages = readahead_count(ractl);
410	struct file *file = ractl->file;
411	struct inode *inode = ractl->mapping->host;
412	struct folio *folio;
413	int ret;
414
415	trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
416	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
417	task_io_account_read(readahead_length(ractl));
418
419	ret = -ESTALE;
420	if (NFS_STALE(inode))
421		goto out;
422
423	ret = nfs_netfs_readahead(ractl);
424	if (!ret)
425		goto out;
426
427	if (file == NULL) {
428		ret = -EBADF;
429		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
430		if (ctx == NULL)
431			goto out;
432	} else
433		ctx = get_nfs_open_context(nfs_file_open_context(file));
434
435	nfs_pageio_init_read(&pgio, inode, false,
436			     &nfs_async_read_completion_ops);
437
438	while ((folio = readahead_folio(ractl)) != NULL) {
439		ret = nfs_read_add_folio(&pgio, ctx, folio);
440		if (ret)
441			break;
442	}
443
444	nfs_pageio_complete_read(&pgio);
445	nfs_update_delegated_atime(inode);
446
447	put_nfs_open_context(ctx);
448out:
449	trace_nfs_aop_readahead_done(inode, nr_pages, ret);
450}
451
452int __init nfs_init_readpagecache(void)
453{
454	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
455					     sizeof(struct nfs_pgio_header),
456					     0, SLAB_HWCACHE_ALIGN,
457					     NULL);
458	if (nfs_rdata_cachep == NULL)
459		return -ENOMEM;
460
461	return 0;
462}
463
464void nfs_destroy_readpagecache(void)
465{
466	kmem_cache_destroy(nfs_rdata_cachep);
467}
468
469static const struct nfs_rw_ops nfs_rw_read_ops = {
470	.rw_alloc_header	= nfs_readhdr_alloc,
471	.rw_free_header		= nfs_readhdr_free,
472	.rw_done		= nfs_readpage_done,
473	.rw_result		= nfs_readpage_result,
474	.rw_initiate		= nfs_initiate_read,
475};