Loading...
1/*
2 * linux/fs/nfs/read.c
3 *
4 * Block I/O for NFS
5 *
6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
8 */
9
10#include <linux/time.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/fcntl.h>
14#include <linux/stat.h>
15#include <linux/mm.h>
16#include <linux/slab.h>
17#include <linux/pagemap.h>
18#include <linux/sunrpc/clnt.h>
19#include <linux/nfs_fs.h>
20#include <linux/nfs_page.h>
21#include <linux/module.h>
22
23#include "nfs4_fs.h"
24#include "internal.h"
25#include "iostat.h"
26#include "fscache.h"
27#include "pnfs.h"
28#include "nfstrace.h"
29
30#define NFSDBG_FACILITY NFSDBG_PAGECACHE
31
32static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
33static const struct nfs_rw_ops nfs_rw_read_ops;
34
35static struct kmem_cache *nfs_rdata_cachep;
36
37static struct nfs_pgio_header *nfs_readhdr_alloc(void)
38{
39 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
40
41 if (p)
42 p->rw_mode = FMODE_READ;
43 return p;
44}
45
46static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
47{
48 kmem_cache_free(nfs_rdata_cachep, rhdr);
49}
50
51static
52int nfs_return_empty_page(struct page *page)
53{
54 zero_user(page, 0, PAGE_SIZE);
55 SetPageUptodate(page);
56 unlock_page(page);
57 return 0;
58}
59
60void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
61 struct inode *inode, bool force_mds,
62 const struct nfs_pgio_completion_ops *compl_ops)
63{
64 struct nfs_server *server = NFS_SERVER(inode);
65 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
66
67#ifdef CONFIG_NFS_V4_1
68 if (server->pnfs_curr_ld && !force_mds)
69 pg_ops = server->pnfs_curr_ld->pg_read_ops;
70#endif
71 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
72 server->rsize, 0);
73}
74EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
75
76void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
77{
78 struct nfs_pgio_mirror *mirror;
79
80 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
81 pgio->pg_ops->pg_cleanup(pgio);
82
83 pgio->pg_ops = &nfs_pgio_rw_ops;
84
85 /* read path should never have more than one mirror */
86 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
87
88 mirror = &pgio->pg_mirrors[0];
89 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
90}
91EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
92
93static void nfs_readpage_release(struct nfs_page *req)
94{
95 struct inode *inode = d_inode(req->wb_context->dentry);
96
97 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
98 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
99 (long long)req_offset(req));
100
101 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
102 if (PageUptodate(req->wb_page))
103 nfs_readpage_to_fscache(inode, req->wb_page, 0);
104
105 unlock_page(req->wb_page);
106 }
107 nfs_release_request(req);
108}
109
110int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
111 struct page *page)
112{
113 struct nfs_page *new;
114 unsigned int len;
115 struct nfs_pageio_descriptor pgio;
116 struct nfs_pgio_mirror *pgm;
117
118 len = nfs_page_length(page);
119 if (len == 0)
120 return nfs_return_empty_page(page);
121 new = nfs_create_request(ctx, page, NULL, 0, len);
122 if (IS_ERR(new)) {
123 unlock_page(page);
124 return PTR_ERR(new);
125 }
126 if (len < PAGE_SIZE)
127 zero_user_segment(page, len, PAGE_SIZE);
128
129 nfs_pageio_init_read(&pgio, inode, false,
130 &nfs_async_read_completion_ops);
131 if (!nfs_pageio_add_request(&pgio, new)) {
132 nfs_list_remove_request(new);
133 nfs_readpage_release(new);
134 }
135 nfs_pageio_complete(&pgio);
136
137 /* It doesn't make sense to do mirrored reads! */
138 WARN_ON_ONCE(pgio.pg_mirror_count != 1);
139
140 pgm = &pgio.pg_mirrors[0];
141 NFS_I(inode)->read_io += pgm->pg_bytes_written;
142
143 return pgio.pg_error < 0 ? pgio.pg_error : 0;
144}
145
146static void nfs_page_group_set_uptodate(struct nfs_page *req)
147{
148 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
149 SetPageUptodate(req->wb_page);
150}
151
152static void nfs_read_completion(struct nfs_pgio_header *hdr)
153{
154 unsigned long bytes = 0;
155
156 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
157 goto out;
158 while (!list_empty(&hdr->pages)) {
159 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
160 struct page *page = req->wb_page;
161 unsigned long start = req->wb_pgbase;
162 unsigned long end = req->wb_pgbase + req->wb_bytes;
163
164 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
165 /* note: regions of the page not covered by a
166 * request are zeroed in nfs_readpage_async /
167 * readpage_async_filler */
168 if (bytes > hdr->good_bytes) {
169 /* nothing in this request was good, so zero
170 * the full extent of the request */
171 zero_user_segment(page, start, end);
172
173 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
174 /* part of this request has good bytes, but
175 * not all. zero the bad bytes */
176 start += hdr->good_bytes - bytes;
177 WARN_ON(start < req->wb_pgbase);
178 zero_user_segment(page, start, end);
179 }
180 }
181 bytes += req->wb_bytes;
182 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
183 if (bytes <= hdr->good_bytes)
184 nfs_page_group_set_uptodate(req);
185 } else
186 nfs_page_group_set_uptodate(req);
187 nfs_list_remove_request(req);
188 nfs_readpage_release(req);
189 }
190out:
191 hdr->release(hdr);
192}
193
194static void nfs_initiate_read(struct nfs_pgio_header *hdr,
195 struct rpc_message *msg,
196 const struct nfs_rpc_ops *rpc_ops,
197 struct rpc_task_setup *task_setup_data, int how)
198{
199 struct inode *inode = hdr->inode;
200 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
201
202 task_setup_data->flags |= swap_flags;
203 rpc_ops->read_setup(hdr, msg);
204 trace_nfs_initiate_read(inode, hdr->io_start, hdr->good_bytes);
205}
206
207static void
208nfs_async_read_error(struct list_head *head)
209{
210 struct nfs_page *req;
211
212 while (!list_empty(head)) {
213 req = nfs_list_entry(head->next);
214 nfs_list_remove_request(req);
215 nfs_readpage_release(req);
216 }
217}
218
219static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
220 .error_cleanup = nfs_async_read_error,
221 .completion = nfs_read_completion,
222};
223
224/*
225 * This is the callback from RPC telling us whether a reply was
226 * received or some error occurred (timeout or socket shutdown).
227 */
228static int nfs_readpage_done(struct rpc_task *task,
229 struct nfs_pgio_header *hdr,
230 struct inode *inode)
231{
232 int status = NFS_PROTO(inode)->read_done(task, hdr);
233 if (status != 0)
234 return status;
235
236 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
237 trace_nfs_readpage_done(inode, task->tk_status,
238 hdr->args.offset, hdr->res.eof);
239
240 if (task->tk_status == -ESTALE) {
241 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
242 nfs_mark_for_revalidate(inode);
243 }
244 return 0;
245}
246
247static void nfs_readpage_retry(struct rpc_task *task,
248 struct nfs_pgio_header *hdr)
249{
250 struct nfs_pgio_args *argp = &hdr->args;
251 struct nfs_pgio_res *resp = &hdr->res;
252
253 /* This is a short read! */
254 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
255 /* Has the server at least made some progress? */
256 if (resp->count == 0) {
257 nfs_set_pgio_error(hdr, -EIO, argp->offset);
258 return;
259 }
260
261 /* For non rpc-based layout drivers, retry-through-MDS */
262 if (!task->tk_ops) {
263 hdr->pnfs_error = -EAGAIN;
264 return;
265 }
266
267 /* Yes, so retry the read at the end of the hdr */
268 hdr->mds_offset += resp->count;
269 argp->offset += resp->count;
270 argp->pgbase += resp->count;
271 argp->count -= resp->count;
272 rpc_restart_call_prepare(task);
273}
274
275static void nfs_readpage_result(struct rpc_task *task,
276 struct nfs_pgio_header *hdr)
277{
278 if (hdr->res.eof) {
279 loff_t bound;
280
281 bound = hdr->args.offset + hdr->res.count;
282 spin_lock(&hdr->lock);
283 if (bound < hdr->io_start + hdr->good_bytes) {
284 set_bit(NFS_IOHDR_EOF, &hdr->flags);
285 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
286 hdr->good_bytes = bound - hdr->io_start;
287 }
288 spin_unlock(&hdr->lock);
289 } else if (hdr->res.count < hdr->args.count)
290 nfs_readpage_retry(task, hdr);
291}
292
293/*
294 * Read a page over NFS.
295 * We read the page synchronously in the following case:
296 * - The error flag is set for this page. This happens only when a
297 * previous async read operation failed.
298 */
299int nfs_readpage(struct file *file, struct page *page)
300{
301 struct nfs_open_context *ctx;
302 struct inode *inode = page_file_mapping(page)->host;
303 int error;
304
305 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
306 page, PAGE_SIZE, page_index(page));
307 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
308 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
309
310 /*
311 * Try to flush any pending writes to the file..
312 *
313 * NOTE! Because we own the page lock, there cannot
314 * be any new pending writes generated at this point
315 * for this page (other pages can be written to).
316 */
317 error = nfs_wb_page(inode, page);
318 if (error)
319 goto out_unlock;
320 if (PageUptodate(page))
321 goto out_unlock;
322
323 error = -ESTALE;
324 if (NFS_STALE(inode))
325 goto out_unlock;
326
327 if (file == NULL) {
328 error = -EBADF;
329 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
330 if (ctx == NULL)
331 goto out_unlock;
332 } else
333 ctx = get_nfs_open_context(nfs_file_open_context(file));
334
335 if (!IS_SYNC(inode)) {
336 error = nfs_readpage_from_fscache(ctx, inode, page);
337 if (error == 0)
338 goto out;
339 }
340
341 error = nfs_readpage_async(ctx, inode, page);
342
343out:
344 put_nfs_open_context(ctx);
345 return error;
346out_unlock:
347 unlock_page(page);
348 return error;
349}
350
351struct nfs_readdesc {
352 struct nfs_pageio_descriptor *pgio;
353 struct nfs_open_context *ctx;
354};
355
356static int
357readpage_async_filler(void *data, struct page *page)
358{
359 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
360 struct nfs_page *new;
361 unsigned int len;
362 int error;
363
364 len = nfs_page_length(page);
365 if (len == 0)
366 return nfs_return_empty_page(page);
367
368 new = nfs_create_request(desc->ctx, page, NULL, 0, len);
369 if (IS_ERR(new))
370 goto out_error;
371
372 if (len < PAGE_SIZE)
373 zero_user_segment(page, len, PAGE_SIZE);
374 if (!nfs_pageio_add_request(desc->pgio, new)) {
375 nfs_list_remove_request(new);
376 nfs_readpage_release(new);
377 error = desc->pgio->pg_error;
378 goto out;
379 }
380 return 0;
381out_error:
382 error = PTR_ERR(new);
383 unlock_page(page);
384out:
385 return error;
386}
387
388int nfs_readpages(struct file *filp, struct address_space *mapping,
389 struct list_head *pages, unsigned nr_pages)
390{
391 struct nfs_pageio_descriptor pgio;
392 struct nfs_pgio_mirror *pgm;
393 struct nfs_readdesc desc = {
394 .pgio = &pgio,
395 };
396 struct inode *inode = mapping->host;
397 unsigned long npages;
398 int ret = -ESTALE;
399
400 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
401 inode->i_sb->s_id,
402 (unsigned long long)NFS_FILEID(inode),
403 nr_pages);
404 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
405
406 if (NFS_STALE(inode))
407 goto out;
408
409 if (filp == NULL) {
410 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
411 if (desc.ctx == NULL)
412 return -EBADF;
413 } else
414 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
415
416 /* attempt to read as many of the pages as possible from the cache
417 * - this returns -ENOBUFS immediately if the cookie is negative
418 */
419 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
420 pages, &nr_pages);
421 if (ret == 0)
422 goto read_complete; /* all pages were read */
423
424 nfs_pageio_init_read(&pgio, inode, false,
425 &nfs_async_read_completion_ops);
426
427 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
428 nfs_pageio_complete(&pgio);
429
430 /* It doesn't make sense to do mirrored reads! */
431 WARN_ON_ONCE(pgio.pg_mirror_count != 1);
432
433 pgm = &pgio.pg_mirrors[0];
434 NFS_I(inode)->read_io += pgm->pg_bytes_written;
435 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
436 PAGE_SHIFT;
437 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
438read_complete:
439 put_nfs_open_context(desc.ctx);
440out:
441 return ret;
442}
443
444int __init nfs_init_readpagecache(void)
445{
446 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
447 sizeof(struct nfs_pgio_header),
448 0, SLAB_HWCACHE_ALIGN,
449 NULL);
450 if (nfs_rdata_cachep == NULL)
451 return -ENOMEM;
452
453 return 0;
454}
455
456void nfs_destroy_readpagecache(void)
457{
458 kmem_cache_destroy(nfs_rdata_cachep);
459}
460
461static const struct nfs_rw_ops nfs_rw_read_ops = {
462 .rw_alloc_header = nfs_readhdr_alloc,
463 .rw_free_header = nfs_readhdr_free,
464 .rw_done = nfs_readpage_done,
465 .rw_result = nfs_readpage_result,
466 .rw_initiate = nfs_initiate_read,
467};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/nfs/read.c
4 *
5 * Block I/O for NFS
6 *
7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8 * modified for async RPC by okir@monad.swb.de
9 */
10
11#include <linux/time.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/fcntl.h>
15#include <linux/stat.h>
16#include <linux/mm.h>
17#include <linux/slab.h>
18#include <linux/task_io_accounting_ops.h>
19#include <linux/pagemap.h>
20#include <linux/sunrpc/clnt.h>
21#include <linux/nfs_fs.h>
22#include <linux/nfs_page.h>
23#include <linux/module.h>
24
25#include "nfs4_fs.h"
26#include "internal.h"
27#include "iostat.h"
28#include "fscache.h"
29#include "pnfs.h"
30#include "nfstrace.h"
31#include "delegation.h"
32
33#define NFSDBG_FACILITY NFSDBG_PAGECACHE
34
35const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
36static const struct nfs_rw_ops nfs_rw_read_ops;
37
38static struct kmem_cache *nfs_rdata_cachep;
39
40static struct nfs_pgio_header *nfs_readhdr_alloc(void)
41{
42 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
43
44 if (p)
45 p->rw_mode = FMODE_READ;
46 return p;
47}
48
49static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
50{
51 kfree(rhdr->res.scratch);
52 kmem_cache_free(nfs_rdata_cachep, rhdr);
53}
54
55static int nfs_return_empty_folio(struct folio *folio)
56{
57 folio_zero_segment(folio, 0, folio_size(folio));
58 folio_mark_uptodate(folio);
59 folio_unlock(folio);
60 return 0;
61}
62
63void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
64 struct inode *inode, bool force_mds,
65 const struct nfs_pgio_completion_ops *compl_ops)
66{
67 struct nfs_server *server = NFS_SERVER(inode);
68 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
69
70#ifdef CONFIG_NFS_V4_1
71 if (server->pnfs_curr_ld && !force_mds)
72 pg_ops = server->pnfs_curr_ld->pg_read_ops;
73#endif
74 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
75 server->rsize, 0);
76}
77EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
78
79void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
80{
81 struct nfs_pgio_mirror *pgm;
82 unsigned long npages;
83
84 nfs_pageio_complete(pgio);
85
86 /* It doesn't make sense to do mirrored reads! */
87 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
88
89 pgm = &pgio->pg_mirrors[0];
90 NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
91 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
92 nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
93}
94
95
96void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
97{
98 struct nfs_pgio_mirror *mirror;
99
100 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
101 pgio->pg_ops->pg_cleanup(pgio);
102
103 pgio->pg_ops = &nfs_pgio_rw_ops;
104
105 /* read path should never have more than one mirror */
106 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
107
108 mirror = &pgio->pg_mirrors[0];
109 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
110}
111EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
112
113bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
114{
115 WARN_ON(hdr->res.scratch != NULL);
116 hdr->res.scratch = kmalloc(size, GFP_KERNEL);
117 return hdr->res.scratch != NULL;
118}
119EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
120
121static void nfs_readpage_release(struct nfs_page *req, int error)
122{
123 struct folio *folio = nfs_page_to_folio(req);
124
125 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
126 if (nfs_netfs_folio_unlock(folio))
127 folio_unlock(folio);
128
129 nfs_release_request(req);
130}
131
132static void nfs_page_group_set_uptodate(struct nfs_page *req)
133{
134 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
135 folio_mark_uptodate(nfs_page_to_folio(req));
136}
137
138static void nfs_read_completion(struct nfs_pgio_header *hdr)
139{
140 unsigned long bytes = 0;
141 int error;
142
143 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
144 goto out;
145 while (!list_empty(&hdr->pages)) {
146 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
147 struct folio *folio = nfs_page_to_folio(req);
148 unsigned long start = req->wb_pgbase;
149 unsigned long end = req->wb_pgbase + req->wb_bytes;
150
151 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
152 /* note: regions of the page not covered by a
153 * request are zeroed in nfs_read_add_folio
154 */
155 if (bytes > hdr->good_bytes) {
156 /* nothing in this request was good, so zero
157 * the full extent of the request */
158 folio_zero_segment(folio, start, end);
159
160 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
161 /* part of this request has good bytes, but
162 * not all. zero the bad bytes */
163 start += hdr->good_bytes - bytes;
164 WARN_ON(start < req->wb_pgbase);
165 folio_zero_segment(folio, start, end);
166 }
167 }
168 error = 0;
169 bytes += req->wb_bytes;
170 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
171 if (bytes <= hdr->good_bytes)
172 nfs_page_group_set_uptodate(req);
173 else {
174 error = hdr->error;
175 xchg(&nfs_req_openctx(req)->error, error);
176 }
177 } else
178 nfs_page_group_set_uptodate(req);
179 nfs_list_remove_request(req);
180 nfs_readpage_release(req, error);
181 }
182 nfs_netfs_read_completion(hdr);
183
184out:
185 hdr->release(hdr);
186}
187
188static void nfs_initiate_read(struct nfs_pgio_header *hdr,
189 struct rpc_message *msg,
190 const struct nfs_rpc_ops *rpc_ops,
191 struct rpc_task_setup *task_setup_data, int how)
192{
193 rpc_ops->read_setup(hdr, msg);
194 nfs_netfs_initiate_read(hdr);
195 trace_nfs_initiate_read(hdr);
196}
197
198static void
199nfs_async_read_error(struct list_head *head, int error)
200{
201 struct nfs_page *req;
202
203 while (!list_empty(head)) {
204 req = nfs_list_entry(head->next);
205 nfs_list_remove_request(req);
206 nfs_readpage_release(req, error);
207 }
208}
209
210const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
211 .error_cleanup = nfs_async_read_error,
212 .completion = nfs_read_completion,
213};
214
215/*
216 * This is the callback from RPC telling us whether a reply was
217 * received or some error occurred (timeout or socket shutdown).
218 */
219static int nfs_readpage_done(struct rpc_task *task,
220 struct nfs_pgio_header *hdr,
221 struct inode *inode)
222{
223 int status = NFS_PROTO(inode)->read_done(task, hdr);
224 if (status != 0)
225 return status;
226
227 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
228 trace_nfs_readpage_done(task, hdr);
229
230 if (task->tk_status == -ESTALE) {
231 nfs_set_inode_stale(inode);
232 nfs_mark_for_revalidate(inode);
233 }
234 return 0;
235}
236
237static void nfs_readpage_retry(struct rpc_task *task,
238 struct nfs_pgio_header *hdr)
239{
240 struct nfs_pgio_args *argp = &hdr->args;
241 struct nfs_pgio_res *resp = &hdr->res;
242
243 /* This is a short read! */
244 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
245 trace_nfs_readpage_short(task, hdr);
246
247 /* Has the server at least made some progress? */
248 if (resp->count == 0) {
249 nfs_set_pgio_error(hdr, -EIO, argp->offset);
250 return;
251 }
252
253 /* For non rpc-based layout drivers, retry-through-MDS */
254 if (!task->tk_ops) {
255 hdr->pnfs_error = -EAGAIN;
256 return;
257 }
258
259 /* Yes, so retry the read at the end of the hdr */
260 hdr->mds_offset += resp->count;
261 argp->offset += resp->count;
262 argp->pgbase += resp->count;
263 argp->count -= resp->count;
264 resp->count = 0;
265 resp->eof = 0;
266 rpc_restart_call_prepare(task);
267}
268
269static void nfs_readpage_result(struct rpc_task *task,
270 struct nfs_pgio_header *hdr)
271{
272 if (hdr->res.eof) {
273 loff_t pos = hdr->args.offset + hdr->res.count;
274 unsigned int new = pos - hdr->io_start;
275
276 if (hdr->good_bytes > new) {
277 hdr->good_bytes = new;
278 set_bit(NFS_IOHDR_EOF, &hdr->flags);
279 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
280 }
281 } else if (hdr->res.count < hdr->args.count)
282 nfs_readpage_retry(task, hdr);
283}
284
285int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
286 struct nfs_open_context *ctx,
287 struct folio *folio)
288{
289 struct inode *inode = folio->mapping->host;
290 struct nfs_server *server = NFS_SERVER(inode);
291 size_t fsize = folio_size(folio);
292 unsigned int rsize = server->rsize;
293 struct nfs_page *new;
294 unsigned int len, aligned_len;
295 int error;
296
297 len = nfs_folio_length(folio);
298 if (len == 0)
299 return nfs_return_empty_folio(folio);
300
301 aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
302
303 new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
304 if (IS_ERR(new)) {
305 error = PTR_ERR(new);
306 if (nfs_netfs_folio_unlock(folio))
307 folio_unlock(folio);
308 goto out;
309 }
310
311 if (len < fsize)
312 folio_zero_segment(folio, len, fsize);
313 if (!nfs_pageio_add_request(pgio, new)) {
314 nfs_list_remove_request(new);
315 error = pgio->pg_error;
316 nfs_readpage_release(new, error);
317 goto out;
318 }
319 return 0;
320out:
321 return error;
322}
323
324/*
325 * Actually read a folio over the wire.
326 */
327static int nfs_do_read_folio(struct file *file, struct folio *folio)
328{
329 struct inode *inode = file_inode(file);
330 struct nfs_pageio_descriptor pgio;
331 struct nfs_open_context *ctx;
332 int ret;
333
334 ctx = get_nfs_open_context(nfs_file_open_context(file));
335
336 xchg(&ctx->error, 0);
337 nfs_pageio_init_read(&pgio, inode, false,
338 &nfs_async_read_completion_ops);
339
340 ret = nfs_read_add_folio(&pgio, ctx, folio);
341 if (ret)
342 goto out_put;
343
344 nfs_pageio_complete_read(&pgio);
345 nfs_update_delegated_atime(inode);
346 if (pgio.pg_error < 0) {
347 ret = pgio.pg_error;
348 goto out_put;
349 }
350
351 ret = folio_wait_locked_killable(folio);
352 if (!folio_test_uptodate(folio) && !ret)
353 ret = xchg(&ctx->error, 0);
354
355out_put:
356 put_nfs_open_context(ctx);
357 return ret;
358}
359
360/*
361 * Synchronously read a folio.
362 *
363 * This is not heavily used as most users to try an asynchronous
364 * large read through ->readahead first.
365 */
366int nfs_read_folio(struct file *file, struct folio *folio)
367{
368 struct inode *inode = file_inode(file);
369 loff_t pos = folio_pos(folio);
370 size_t len = folio_size(folio);
371 int ret;
372
373 trace_nfs_aop_readpage(inode, pos, len);
374 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
375 task_io_account_read(len);
376
377 /*
378 * Try to flush any pending writes to the file..
379 *
380 * NOTE! Because we own the folio lock, there cannot
381 * be any new pending writes generated at this point
382 * for this folio (other folios can be written to).
383 */
384 ret = nfs_wb_folio(inode, folio);
385 if (ret)
386 goto out_unlock;
387 if (folio_test_uptodate(folio))
388 goto out_unlock;
389
390 ret = -ESTALE;
391 if (NFS_STALE(inode))
392 goto out_unlock;
393
394 ret = nfs_netfs_read_folio(file, folio);
395 if (ret)
396 ret = nfs_do_read_folio(file, folio);
397out:
398 trace_nfs_aop_readpage_done(inode, pos, len, ret);
399 return ret;
400out_unlock:
401 folio_unlock(folio);
402 goto out;
403}
404
405void nfs_readahead(struct readahead_control *ractl)
406{
407 struct nfs_pageio_descriptor pgio;
408 struct nfs_open_context *ctx;
409 unsigned int nr_pages = readahead_count(ractl);
410 struct file *file = ractl->file;
411 struct inode *inode = ractl->mapping->host;
412 struct folio *folio;
413 int ret;
414
415 trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
416 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
417 task_io_account_read(readahead_length(ractl));
418
419 ret = -ESTALE;
420 if (NFS_STALE(inode))
421 goto out;
422
423 ret = nfs_netfs_readahead(ractl);
424 if (!ret)
425 goto out;
426
427 if (file == NULL) {
428 ret = -EBADF;
429 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
430 if (ctx == NULL)
431 goto out;
432 } else
433 ctx = get_nfs_open_context(nfs_file_open_context(file));
434
435 nfs_pageio_init_read(&pgio, inode, false,
436 &nfs_async_read_completion_ops);
437
438 while ((folio = readahead_folio(ractl)) != NULL) {
439 ret = nfs_read_add_folio(&pgio, ctx, folio);
440 if (ret)
441 break;
442 }
443
444 nfs_pageio_complete_read(&pgio);
445 nfs_update_delegated_atime(inode);
446
447 put_nfs_open_context(ctx);
448out:
449 trace_nfs_aop_readahead_done(inode, nr_pages, ret);
450}
451
452int __init nfs_init_readpagecache(void)
453{
454 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
455 sizeof(struct nfs_pgio_header),
456 0, SLAB_HWCACHE_ALIGN,
457 NULL);
458 if (nfs_rdata_cachep == NULL)
459 return -ENOMEM;
460
461 return 0;
462}
463
464void nfs_destroy_readpagecache(void)
465{
466 kmem_cache_destroy(nfs_rdata_cachep);
467}
468
469static const struct nfs_rw_ops nfs_rw_read_ops = {
470 .rw_alloc_header = nfs_readhdr_alloc,
471 .rw_free_header = nfs_readhdr_free,
472 .rw_done = nfs_readpage_done,
473 .rw_result = nfs_readpage_result,
474 .rw_initiate = nfs_initiate_read,
475};