Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * linux/fs/nfs/read.c
  3 *
  4 * Block I/O for NFS
  5 *
  6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  7 * modified for async RPC by okir@monad.swb.de
  8 */
  9
 10#include <linux/time.h>
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/fcntl.h>
 14#include <linux/stat.h>
 15#include <linux/mm.h>
 16#include <linux/slab.h>
 17#include <linux/pagemap.h>
 18#include <linux/sunrpc/clnt.h>
 19#include <linux/nfs_fs.h>
 20#include <linux/nfs_page.h>
 21#include <linux/module.h>
 22
 
 
 
 23#include "nfs4_fs.h"
 24#include "internal.h"
 25#include "iostat.h"
 26#include "fscache.h"
 27
 28#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
 29
 30static const struct nfs_pageio_ops nfs_pageio_read_ops;
 31static const struct rpc_call_ops nfs_read_common_ops;
 32static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
 33
 34static struct kmem_cache *nfs_rdata_cachep;
 
 35
 36struct nfs_read_header *nfs_readhdr_alloc(void)
 37{
 38	struct nfs_read_header *rhdr;
 39
 40	rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
 41	if (rhdr) {
 42		struct nfs_pgio_header *hdr = &rhdr->header;
 43
 44		INIT_LIST_HEAD(&hdr->pages);
 45		INIT_LIST_HEAD(&hdr->rpc_list);
 46		spin_lock_init(&hdr->lock);
 47		atomic_set(&hdr->refcnt, 0);
 48	}
 49	return rhdr;
 50}
 51EXPORT_SYMBOL_GPL(nfs_readhdr_alloc);
 52
 53static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
 54						unsigned int pagecount)
 55{
 56	struct nfs_read_data *data, *prealloc;
 57
 58	prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data;
 59	if (prealloc->header == NULL)
 60		data = prealloc;
 61	else
 62		data = kzalloc(sizeof(*data), GFP_KERNEL);
 63	if (!data)
 64		goto out;
 65
 66	if (nfs_pgarray_set(&data->pages, pagecount)) {
 67		data->header = hdr;
 68		atomic_inc(&hdr->refcnt);
 69	} else {
 70		if (data != prealloc)
 71			kfree(data);
 72		data = NULL;
 
 
 
 
 
 
 73	}
 74out:
 75	return data;
 76}
 77
 78void nfs_readhdr_free(struct nfs_pgio_header *hdr)
 79{
 80	struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header);
 81
 82	kmem_cache_free(nfs_rdata_cachep, rhdr);
 83}
 84EXPORT_SYMBOL_GPL(nfs_readhdr_free);
 85
 86void nfs_readdata_release(struct nfs_read_data *rdata)
 87{
 88	struct nfs_pgio_header *hdr = rdata->header;
 89	struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header);
 90
 91	put_nfs_open_context(rdata->args.context);
 92	if (rdata->pages.pagevec != rdata->pages.page_array)
 93		kfree(rdata->pages.pagevec);
 94	if (rdata == &read_header->rpc_data) {
 95		rdata->header = NULL;
 96		rdata = NULL;
 97	}
 98	if (atomic_dec_and_test(&hdr->refcnt))
 99		hdr->completion_ops->completion(hdr);
100	/* Note: we only free the rpc_task after callbacks are done.
101	 * See the comment in rpc_free_task() for why
102	 */
103	kfree(rdata);
104}
105EXPORT_SYMBOL_GPL(nfs_readdata_release);
106
107static
108int nfs_return_empty_page(struct page *page)
109{
110	zero_user(page, 0, PAGE_CACHE_SIZE);
111	SetPageUptodate(page);
112	unlock_page(page);
113	return 0;
114}
115
116void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
117			      struct inode *inode,
118			      const struct nfs_pgio_completion_ops *compl_ops)
119{
120	nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121			NFS_SERVER(inode)->rsize, 0);
122}
123EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
124
125void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
126{
127	pgio->pg_ops = &nfs_pageio_read_ops;
128	pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
129}
130EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
131
 
 
 
 
 
 
 
132int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
133		       struct page *page)
134{
135	struct nfs_page	*new;
136	unsigned int len;
137	struct nfs_pageio_descriptor pgio;
138
139	len = nfs_page_length(page);
140	if (len == 0)
141		return nfs_return_empty_page(page);
142	new = nfs_create_request(ctx, inode, page, 0, len);
143	if (IS_ERR(new)) {
144		unlock_page(page);
145		return PTR_ERR(new);
146	}
147	if (len < PAGE_CACHE_SIZE)
148		zero_user_segment(page, len, PAGE_CACHE_SIZE);
149
150	NFS_PROTO(inode)->read_pageio_init(&pgio, inode, &nfs_async_read_completion_ops);
151	nfs_pageio_add_request(&pgio, new);
152	nfs_pageio_complete(&pgio);
153	NFS_I(inode)->read_io += pgio.pg_bytes_written;
154	return 0;
155}
156
157static void nfs_readpage_release(struct nfs_page *req)
158{
159	struct inode *d_inode = req->wb_context->dentry->d_inode;
160
161	if (PageUptodate(req->wb_page))
162		nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
163
164	unlock_page(req->wb_page);
165
166	dprintk("NFS: read done (%s/%Lu %d@%Ld)\n",
167			req->wb_context->dentry->d_inode->i_sb->s_id,
168			(unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
169			req->wb_bytes,
170			(long long)req_offset(req));
171	nfs_release_request(req);
172}
173
174/* Note io was page aligned */
175static void nfs_read_completion(struct nfs_pgio_header *hdr)
176{
177	unsigned long bytes = 0;
178
179	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
180		goto out;
181	while (!list_empty(&hdr->pages)) {
182		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
183		struct page *page = req->wb_page;
184
185		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
186			if (bytes > hdr->good_bytes)
187				zero_user(page, 0, PAGE_SIZE);
188			else if (hdr->good_bytes - bytes < PAGE_SIZE)
189				zero_user_segment(page,
190					hdr->good_bytes & ~PAGE_MASK,
191					PAGE_SIZE);
192		}
193		bytes += req->wb_bytes;
194		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
195			if (bytes <= hdr->good_bytes)
196				SetPageUptodate(page);
197		} else
198			SetPageUptodate(page);
199		nfs_list_remove_request(req);
200		nfs_readpage_release(req);
201	}
202out:
203	hdr->release(hdr);
204}
205
206int nfs_initiate_read(struct rpc_clnt *clnt,
207		      struct nfs_read_data *data,
208		      const struct rpc_call_ops *call_ops, int flags)
209{
210	struct inode *inode = data->header->inode;
211	int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
212	struct rpc_task *task;
213	struct rpc_message msg = {
214		.rpc_argp = &data->args,
215		.rpc_resp = &data->res,
216		.rpc_cred = data->header->cred,
217	};
218	struct rpc_task_setup task_setup_data = {
219		.task = &data->task,
220		.rpc_client = clnt,
221		.rpc_message = &msg,
222		.callback_ops = call_ops,
223		.callback_data = data,
224		.workqueue = nfsiod_workqueue,
225		.flags = RPC_TASK_ASYNC | swap_flags | flags,
226	};
227
228	/* Set up the initial task struct. */
229	NFS_PROTO(inode)->read_setup(data, &msg);
230
231	dprintk("NFS: %5u initiated read call (req %s/%llu, %u bytes @ "
232			"offset %llu)\n",
233			data->task.tk_pid,
234			inode->i_sb->s_id,
235			(unsigned long long)NFS_FILEID(inode),
236			data->args.count,
237			(unsigned long long)data->args.offset);
238
239	task = rpc_run_task(&task_setup_data);
240	if (IS_ERR(task))
241		return PTR_ERR(task);
242	rpc_put_task(task);
243	return 0;
244}
245EXPORT_SYMBOL_GPL(nfs_initiate_read);
246
247/*
248 * Set up the NFS read request struct
249 */
250static void nfs_read_rpcsetup(struct nfs_read_data *data,
251		unsigned int count, unsigned int offset)
252{
253	struct nfs_page *req = data->header->req;
 
 
 
 
254
255	data->args.fh     = NFS_FH(data->header->inode);
256	data->args.offset = req_offset(req) + offset;
257	data->args.pgbase = req->wb_pgbase + offset;
258	data->args.pages  = data->pages.pagevec;
259	data->args.count  = count;
260	data->args.context = get_nfs_open_context(req->wb_context);
261	data->args.lock_context = req->wb_lock_context;
262
263	data->res.fattr   = &data->fattr;
264	data->res.count   = count;
265	data->res.eof     = 0;
266	nfs_fattr_init(&data->fattr);
267}
268
269static int nfs_do_read(struct nfs_read_data *data,
270		const struct rpc_call_ops *call_ops)
271{
272	struct inode *inode = data->header->inode;
273
274	return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
275}
276
277static int
278nfs_do_multiple_reads(struct list_head *head,
279		const struct rpc_call_ops *call_ops)
280{
281	struct nfs_read_data *data;
282	int ret = 0;
283
284	while (!list_empty(head)) {
285		int ret2;
286
287		data = list_first_entry(head, struct nfs_read_data, list);
288		list_del_init(&data->list);
289
290		ret2 = nfs_do_read(data, call_ops);
291		if (ret == 0)
292			ret = ret2;
293	}
294	return ret;
295}
296
297static void
298nfs_async_read_error(struct list_head *head)
299{
300	struct nfs_page	*req;
301
302	while (!list_empty(head)) {
303		req = nfs_list_entry(head->next);
304		nfs_list_remove_request(req);
 
305		nfs_readpage_release(req);
306	}
307}
308
309static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
310	.error_cleanup = nfs_async_read_error,
311	.completion = nfs_read_completion,
312};
313
314static void nfs_pagein_error(struct nfs_pageio_descriptor *desc,
315		struct nfs_pgio_header *hdr)
316{
317	set_bit(NFS_IOHDR_REDO, &hdr->flags);
318	while (!list_empty(&hdr->rpc_list)) {
319		struct nfs_read_data *data = list_first_entry(&hdr->rpc_list,
320				struct nfs_read_data, list);
321		list_del(&data->list);
322		nfs_readdata_release(data);
323	}
324	desc->pg_completion_ops->error_cleanup(&desc->pg_list);
325}
326
327/*
328 * Generate multiple requests to fill a single page.
329 *
330 * We optimize to reduce the number of read operations on the wire.  If we
331 * detect that we're reading a page, or an area of a page, that is past the
332 * end of file, we do not generate NFS read operations but just clear the
333 * parts of the page that would have come back zero from the server anyway.
334 *
335 * We rely on the cached value of i_size to make this determination; another
336 * client can fill pages on the server past our cached end-of-file, but we
337 * won't see the new data until our attribute cache is updated.  This is more
338 * or less conventional NFS client behavior.
339 */
340static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
341			    struct nfs_pgio_header *hdr)
342{
343	struct nfs_page *req = hdr->req;
344	struct page *page = req->wb_page;
345	struct nfs_read_data *data;
346	size_t rsize = desc->pg_bsize, nbytes;
347	unsigned int offset;
 
 
 
 
348
349	offset = 0;
350	nbytes = desc->pg_count;
351	do {
352		size_t len = min(nbytes,rsize);
353
354		data = nfs_readdata_alloc(hdr, 1);
355		if (!data) {
356			nfs_pagein_error(desc, hdr);
357			return -ENOMEM;
358		}
359		data->pages.pagevec[0] = page;
360		nfs_read_rpcsetup(data, len, offset);
361		list_add(&data->list, &hdr->rpc_list);
362		nbytes -= len;
363		offset += len;
364	} while (nbytes != 0);
365
366	nfs_list_remove_request(req);
367	nfs_list_add_request(req, &hdr->pages);
368	desc->pg_rpc_callops = &nfs_read_common_ops;
369	return 0;
 
 
 
 
 
 
 
 
370}
371
372static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
373			  struct nfs_pgio_header *hdr)
374{
375	struct nfs_page		*req;
376	struct page		**pages;
377	struct nfs_read_data    *data;
378	struct list_head *head = &desc->pg_list;
 
379
380	data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
381							  desc->pg_count));
382	if (!data) {
383		nfs_pagein_error(desc, hdr);
384		return -ENOMEM;
 
385	}
386
387	pages = data->pages.pagevec;
388	while (!list_empty(head)) {
389		req = nfs_list_entry(head->next);
390		nfs_list_remove_request(req);
391		nfs_list_add_request(req, &hdr->pages);
 
392		*pages++ = req->wb_page;
393	}
 
394
395	nfs_read_rpcsetup(data, desc->pg_count, 0);
396	list_add(&data->list, &hdr->rpc_list);
397	desc->pg_rpc_callops = &nfs_read_common_ops;
398	return 0;
 
399}
400
401int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
402		       struct nfs_pgio_header *hdr)
403{
404	if (desc->pg_bsize < PAGE_CACHE_SIZE)
405		return nfs_pagein_multi(desc, hdr);
406	return nfs_pagein_one(desc, hdr);
407}
408EXPORT_SYMBOL_GPL(nfs_generic_pagein);
409
410static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
411{
412	struct nfs_read_header *rhdr;
413	struct nfs_pgio_header *hdr;
414	int ret;
415
416	rhdr = nfs_readhdr_alloc();
417	if (!rhdr) {
418		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
419		return -ENOMEM;
420	}
421	hdr = &rhdr->header;
422	nfs_pgheader_init(desc, hdr, nfs_readhdr_free);
423	atomic_inc(&hdr->refcnt);
424	ret = nfs_generic_pagein(desc, hdr);
425	if (ret == 0)
426		ret = nfs_do_multiple_reads(&hdr->rpc_list,
427					    desc->pg_rpc_callops);
428	if (atomic_dec_and_test(&hdr->refcnt))
429		hdr->completion_ops->completion(hdr);
430	return ret;
431}
432
433static const struct nfs_pageio_ops nfs_pageio_read_ops = {
434	.pg_test = nfs_generic_pg_test,
435	.pg_doio = nfs_generic_pg_readpages,
436};
437
438/*
439 * This is the callback from RPC telling us whether a reply was
440 * received or some error occurred (timeout or socket shutdown).
441 */
442int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
443{
444	struct inode *inode = data->header->inode;
445	int status;
446
447	dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
448			task->tk_status);
449
450	status = NFS_PROTO(inode)->read_done(task, data);
451	if (status != 0)
452		return status;
453
454	nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
455
456	if (task->tk_status == -ESTALE) {
457		set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
458		nfs_mark_for_revalidate(inode);
459	}
460	return 0;
461}
462
463static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
464{
465	struct nfs_readargs *argp = &data->args;
466	struct nfs_readres *resp = &data->res;
467
 
 
 
468	/* This is a short read! */
469	nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
470	/* Has the server at least made some progress? */
471	if (resp->count == 0) {
472		nfs_set_pgio_error(data->header, -EIO, argp->offset);
473		return;
474	}
475	/* Yes, so retry the read at the end of the data */
476	data->mds_offset += resp->count;
477	argp->offset += resp->count;
478	argp->pgbase += resp->count;
479	argp->count -= resp->count;
480	rpc_restart_call_prepare(task);
481}
482
483static void nfs_readpage_result_common(struct rpc_task *task, void *calldata)
 
 
 
484{
485	struct nfs_read_data *data = calldata;
486	struct nfs_pgio_header *hdr = data->header;
487
488	/* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
489	if (nfs_readpage_result(task, data) != 0)
490		return;
491	if (task->tk_status < 0)
492		nfs_set_pgio_error(hdr, task->tk_status, data->args.offset);
493	else if (data->res.eof) {
494		loff_t bound;
495
496		bound = data->args.offset + data->res.count;
497		spin_lock(&hdr->lock);
498		if (bound < hdr->io_start + hdr->good_bytes) {
499			set_bit(NFS_IOHDR_EOF, &hdr->flags);
500			clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
501			hdr->good_bytes = bound - hdr->io_start;
502		}
503		spin_unlock(&hdr->lock);
504	} else if (data->res.count != data->args.count)
505		nfs_readpage_retry(task, data);
506}
507
508static void nfs_readpage_release_common(void *calldata)
509{
 
 
 
 
 
 
 
 
 
 
 
 
 
510	nfs_readdata_release(calldata);
511}
512
 
513void nfs_read_prepare(struct rpc_task *task, void *calldata)
514{
515	struct nfs_read_data *data = calldata;
516	int err;
517	err = NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
518	if (err)
519		rpc_exit(task, err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520}
521
522static const struct rpc_call_ops nfs_read_common_ops = {
 
523	.rpc_call_prepare = nfs_read_prepare,
524	.rpc_call_done = nfs_readpage_result_common,
525	.rpc_release = nfs_readpage_release_common,
 
526};
527
528/*
529 * Read a page over NFS.
530 * We read the page synchronously in the following case:
531 *  -	The error flag is set for this page. This happens only when a
532 *	previous async read operation failed.
533 */
534int nfs_readpage(struct file *file, struct page *page)
535{
536	struct nfs_open_context *ctx;
537	struct inode *inode = page_file_mapping(page)->host;
538	int		error;
539
540	dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
541		page, PAGE_CACHE_SIZE, page_file_index(page));
542	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
543	nfs_add_stats(inode, NFSIOS_READPAGES, 1);
544
545	/*
546	 * Try to flush any pending writes to the file..
547	 *
548	 * NOTE! Because we own the page lock, there cannot
549	 * be any new pending writes generated at this point
550	 * for this page (other pages can be written to).
551	 */
552	error = nfs_wb_page(inode, page);
553	if (error)
554		goto out_unlock;
555	if (PageUptodate(page))
556		goto out_unlock;
557
558	error = -ESTALE;
559	if (NFS_STALE(inode))
560		goto out_unlock;
561
562	if (file == NULL) {
563		error = -EBADF;
564		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
565		if (ctx == NULL)
566			goto out_unlock;
567	} else
568		ctx = get_nfs_open_context(nfs_file_open_context(file));
569
570	if (!IS_SYNC(inode)) {
571		error = nfs_readpage_from_fscache(ctx, inode, page);
572		if (error == 0)
573			goto out;
574	}
575
576	error = nfs_readpage_async(ctx, inode, page);
577
578out:
579	put_nfs_open_context(ctx);
580	return error;
581out_unlock:
582	unlock_page(page);
583	return error;
584}
585
586struct nfs_readdesc {
587	struct nfs_pageio_descriptor *pgio;
588	struct nfs_open_context *ctx;
589};
590
591static int
592readpage_async_filler(void *data, struct page *page)
593{
594	struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
595	struct inode *inode = page_file_mapping(page)->host;
596	struct nfs_page *new;
597	unsigned int len;
598	int error;
599
600	len = nfs_page_length(page);
601	if (len == 0)
602		return nfs_return_empty_page(page);
603
604	new = nfs_create_request(desc->ctx, inode, page, 0, len);
605	if (IS_ERR(new))
606		goto out_error;
607
608	if (len < PAGE_CACHE_SIZE)
609		zero_user_segment(page, len, PAGE_CACHE_SIZE);
610	if (!nfs_pageio_add_request(desc->pgio, new)) {
611		error = desc->pgio->pg_error;
612		goto out_unlock;
613	}
614	return 0;
615out_error:
616	error = PTR_ERR(new);
 
617out_unlock:
618	unlock_page(page);
619	return error;
620}
621
622int nfs_readpages(struct file *filp, struct address_space *mapping,
623		struct list_head *pages, unsigned nr_pages)
624{
625	struct nfs_pageio_descriptor pgio;
626	struct nfs_readdesc desc = {
627		.pgio = &pgio,
628	};
629	struct inode *inode = mapping->host;
630	unsigned long npages;
631	int ret = -ESTALE;
632
633	dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
634			inode->i_sb->s_id,
635			(unsigned long long)NFS_FILEID(inode),
636			nr_pages);
637	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
638
639	if (NFS_STALE(inode))
640		goto out;
641
642	if (filp == NULL) {
643		desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
644		if (desc.ctx == NULL)
645			return -EBADF;
646	} else
647		desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
648
649	/* attempt to read as many of the pages as possible from the cache
650	 * - this returns -ENOBUFS immediately if the cookie is negative
651	 */
652	ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
653					 pages, &nr_pages);
654	if (ret == 0)
655		goto read_complete; /* all pages were read */
656
657	NFS_PROTO(inode)->read_pageio_init(&pgio, inode, &nfs_async_read_completion_ops);
658
659	ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
660
661	nfs_pageio_complete(&pgio);
662	NFS_I(inode)->read_io += pgio.pg_bytes_written;
663	npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
664	nfs_add_stats(inode, NFSIOS_READPAGES, npages);
665read_complete:
666	put_nfs_open_context(desc.ctx);
667out:
668	return ret;
669}
670
671int __init nfs_init_readpagecache(void)
672{
673	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
674					     sizeof(struct nfs_read_header),
675					     0, SLAB_HWCACHE_ALIGN,
676					     NULL);
677	if (nfs_rdata_cachep == NULL)
678		return -ENOMEM;
679
 
 
 
 
 
680	return 0;
681}
682
683void nfs_destroy_readpagecache(void)
684{
 
685	kmem_cache_destroy(nfs_rdata_cachep);
686}
v3.1
  1/*
  2 * linux/fs/nfs/read.c
  3 *
  4 * Block I/O for NFS
  5 *
  6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  7 * modified for async RPC by okir@monad.swb.de
  8 */
  9
 10#include <linux/time.h>
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/fcntl.h>
 14#include <linux/stat.h>
 15#include <linux/mm.h>
 16#include <linux/slab.h>
 17#include <linux/pagemap.h>
 18#include <linux/sunrpc/clnt.h>
 19#include <linux/nfs_fs.h>
 20#include <linux/nfs_page.h>
 21#include <linux/module.h>
 22
 23#include <asm/system.h>
 24#include "pnfs.h"
 25
 26#include "nfs4_fs.h"
 27#include "internal.h"
 28#include "iostat.h"
 29#include "fscache.h"
 30
 31#define NFSDBG_FACILITY		NFSDBG_PAGECACHE
 32
 33static const struct nfs_pageio_ops nfs_pageio_read_ops;
 34static const struct rpc_call_ops nfs_read_partial_ops;
 35static const struct rpc_call_ops nfs_read_full_ops;
 36
 37static struct kmem_cache *nfs_rdata_cachep;
 38static mempool_t *nfs_rdata_mempool;
 39
 40#define MIN_POOL_READ	(32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41
 42struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
 
 43{
 44	struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_KERNEL);
 
 
 
 
 
 
 
 
 45
 46	if (p) {
 47		memset(p, 0, sizeof(*p));
 48		INIT_LIST_HEAD(&p->pages);
 49		p->npages = pagecount;
 50		if (pagecount <= ARRAY_SIZE(p->page_array))
 51			p->pagevec = p->page_array;
 52		else {
 53			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
 54			if (!p->pagevec) {
 55				mempool_free(p, nfs_rdata_mempool);
 56				p = NULL;
 57			}
 58		}
 59	}
 60	return p;
 
 61}
 62
 63void nfs_readdata_free(struct nfs_read_data *p)
 64{
 65	if (p && (p->pagevec != &p->page_array[0]))
 66		kfree(p->pagevec);
 67	mempool_free(p, nfs_rdata_mempool);
 68}
 
 69
 70void nfs_readdata_release(struct nfs_read_data *rdata)
 71{
 72	put_lseg(rdata->lseg);
 
 
 73	put_nfs_open_context(rdata->args.context);
 74	nfs_readdata_free(rdata);
 
 
 
 
 
 
 
 
 
 
 
 75}
 
 76
 77static
 78int nfs_return_empty_page(struct page *page)
 79{
 80	zero_user(page, 0, PAGE_CACHE_SIZE);
 81	SetPageUptodate(page);
 82	unlock_page(page);
 83	return 0;
 84}
 85
 86static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
 
 
 87{
 88	unsigned int remainder = data->args.count - data->res.count;
 89	unsigned int base = data->args.pgbase + data->res.count;
 90	unsigned int pglen;
 91	struct page **pages;
 92
 93	if (data->res.eof == 0 || remainder == 0)
 94		return;
 95	/*
 96	 * Note: "remainder" can never be negative, since we check for
 97	 * 	this in the XDR code.
 98	 */
 99	pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
100	base &= ~PAGE_CACHE_MASK;
101	pglen = PAGE_CACHE_SIZE - base;
102	for (;;) {
103		if (remainder <= pglen) {
104			zero_user(*pages, base, remainder);
105			break;
106		}
107		zero_user(*pages, base, pglen);
108		pages++;
109		remainder -= pglen;
110		pglen = PAGE_CACHE_SIZE;
111		base = 0;
112	}
113}
114
115static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
116		struct inode *inode)
117{
118	nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
119			NFS_SERVER(inode)->rsize, 0);
120}
 
121
122void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
123{
124	pgio->pg_ops = &nfs_pageio_read_ops;
125	pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
126}
127EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
128
129static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
130		struct inode *inode)
131{
132	if (!pnfs_pageio_init_read(pgio, inode))
133		nfs_pageio_init_read_mds(pgio, inode);
134}
135
136int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
137		       struct page *page)
138{
139	struct nfs_page	*new;
140	unsigned int len;
141	struct nfs_pageio_descriptor pgio;
142
143	len = nfs_page_length(page);
144	if (len == 0)
145		return nfs_return_empty_page(page);
146	new = nfs_create_request(ctx, inode, page, 0, len);
147	if (IS_ERR(new)) {
148		unlock_page(page);
149		return PTR_ERR(new);
150	}
151	if (len < PAGE_CACHE_SIZE)
152		zero_user_segment(page, len, PAGE_CACHE_SIZE);
153
154	nfs_pageio_init_read(&pgio, inode);
155	nfs_pageio_add_request(&pgio, new);
156	nfs_pageio_complete(&pgio);
 
157	return 0;
158}
159
160static void nfs_readpage_release(struct nfs_page *req)
161{
162	struct inode *d_inode = req->wb_context->dentry->d_inode;
163
164	if (PageUptodate(req->wb_page))
165		nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
166
167	unlock_page(req->wb_page);
168
169	dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
170			req->wb_context->dentry->d_inode->i_sb->s_id,
171			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
172			req->wb_bytes,
173			(long long)req_offset(req));
174	nfs_release_request(req);
175}
176
177int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
178		      const struct rpc_call_ops *call_ops)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179{
180	struct inode *inode = data->inode;
181	int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
182	struct rpc_task *task;
183	struct rpc_message msg = {
184		.rpc_argp = &data->args,
185		.rpc_resp = &data->res,
186		.rpc_cred = data->cred,
187	};
188	struct rpc_task_setup task_setup_data = {
189		.task = &data->task,
190		.rpc_client = clnt,
191		.rpc_message = &msg,
192		.callback_ops = call_ops,
193		.callback_data = data,
194		.workqueue = nfsiod_workqueue,
195		.flags = RPC_TASK_ASYNC | swap_flags,
196	};
197
198	/* Set up the initial task struct. */
199	NFS_PROTO(inode)->read_setup(data, &msg);
200
201	dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ "
202			"offset %llu)\n",
203			data->task.tk_pid,
204			inode->i_sb->s_id,
205			(long long)NFS_FILEID(inode),
206			data->args.count,
207			(unsigned long long)data->args.offset);
208
209	task = rpc_run_task(&task_setup_data);
210	if (IS_ERR(task))
211		return PTR_ERR(task);
212	rpc_put_task(task);
213	return 0;
214}
215EXPORT_SYMBOL_GPL(nfs_initiate_read);
216
217/*
218 * Set up the NFS read request struct
219 */
220static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
221		unsigned int count, unsigned int offset)
222{
223	struct inode *inode = req->wb_context->dentry->d_inode;
224
225	data->req	  = req;
226	data->inode	  = inode;
227	data->cred	  = req->wb_context->cred;
228
229	data->args.fh     = NFS_FH(inode);
230	data->args.offset = req_offset(req) + offset;
231	data->args.pgbase = req->wb_pgbase + offset;
232	data->args.pages  = data->pagevec;
233	data->args.count  = count;
234	data->args.context = get_nfs_open_context(req->wb_context);
235	data->args.lock_context = req->wb_lock_context;
236
237	data->res.fattr   = &data->fattr;
238	data->res.count   = count;
239	data->res.eof     = 0;
240	nfs_fattr_init(&data->fattr);
241}
242
243static int nfs_do_read(struct nfs_read_data *data,
244		const struct rpc_call_ops *call_ops)
245{
246	struct inode *inode = data->args.context->dentry->d_inode;
247
248	return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
249}
250
251static int
252nfs_do_multiple_reads(struct list_head *head,
253		const struct rpc_call_ops *call_ops)
254{
255	struct nfs_read_data *data;
256	int ret = 0;
257
258	while (!list_empty(head)) {
259		int ret2;
260
261		data = list_entry(head->next, struct nfs_read_data, list);
262		list_del_init(&data->list);
263
264		ret2 = nfs_do_read(data, call_ops);
265		if (ret == 0)
266			ret = ret2;
267	}
268	return ret;
269}
270
271static void
272nfs_async_read_error(struct list_head *head)
273{
274	struct nfs_page	*req;
275
276	while (!list_empty(head)) {
277		req = nfs_list_entry(head->next);
278		nfs_list_remove_request(req);
279		SetPageError(req->wb_page);
280		nfs_readpage_release(req);
281	}
282}
283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284/*
285 * Generate multiple requests to fill a single page.
286 *
287 * We optimize to reduce the number of read operations on the wire.  If we
288 * detect that we're reading a page, or an area of a page, that is past the
289 * end of file, we do not generate NFS read operations but just clear the
290 * parts of the page that would have come back zero from the server anyway.
291 *
292 * We rely on the cached value of i_size to make this determination; another
293 * client can fill pages on the server past our cached end-of-file, but we
294 * won't see the new data until our attribute cache is updated.  This is more
295 * or less conventional NFS client behavior.
296 */
297static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
 
298{
299	struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
300	struct page *page = req->wb_page;
301	struct nfs_read_data *data;
302	size_t rsize = desc->pg_bsize, nbytes;
303	unsigned int offset;
304	int requests = 0;
305	int ret = 0;
306
307	nfs_list_remove_request(req);
308
309	offset = 0;
310	nbytes = desc->pg_count;
311	do {
312		size_t len = min(nbytes,rsize);
313
314		data = nfs_readdata_alloc(1);
315		if (!data)
316			goto out_bad;
317		data->pagevec[0] = page;
318		nfs_read_rpcsetup(req, data, len, offset);
319		list_add(&data->list, res);
320		requests++;
 
321		nbytes -= len;
322		offset += len;
323	} while(nbytes != 0);
324	atomic_set(&req->wb_complete, requests);
325	ClearPageError(page);
326	desc->pg_rpc_callops = &nfs_read_partial_ops;
327	return ret;
328out_bad:
329	while (!list_empty(res)) {
330		data = list_entry(res->next, struct nfs_read_data, list);
331		list_del(&data->list);
332		nfs_readdata_free(data);
333	}
334	SetPageError(page);
335	nfs_readpage_release(req);
336	return -ENOMEM;
337}
338
339static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
 
340{
341	struct nfs_page		*req;
342	struct page		**pages;
343	struct nfs_read_data	*data;
344	struct list_head *head = &desc->pg_list;
345	int ret = 0;
346
347	data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
348						     desc->pg_count));
349	if (!data) {
350		nfs_async_read_error(head);
351		ret = -ENOMEM;
352		goto out;
353	}
354
355	pages = data->pagevec;
356	while (!list_empty(head)) {
357		req = nfs_list_entry(head->next);
358		nfs_list_remove_request(req);
359		nfs_list_add_request(req, &data->pages);
360		ClearPageError(req->wb_page);
361		*pages++ = req->wb_page;
362	}
363	req = nfs_list_entry(data->pages.next);
364
365	nfs_read_rpcsetup(req, data, desc->pg_count, 0);
366	list_add(&data->list, res);
367	desc->pg_rpc_callops = &nfs_read_full_ops;
368out:
369	return ret;
370}
371
372int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
 
373{
374	if (desc->pg_bsize < PAGE_CACHE_SIZE)
375		return nfs_pagein_multi(desc, head);
376	return nfs_pagein_one(desc, head);
377}
 
378
379static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
380{
381	LIST_HEAD(head);
 
382	int ret;
383
384	ret = nfs_generic_pagein(desc, &head);
 
 
 
 
 
 
 
 
385	if (ret == 0)
386		ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
 
 
 
387	return ret;
388}
389
390static const struct nfs_pageio_ops nfs_pageio_read_ops = {
391	.pg_test = nfs_generic_pg_test,
392	.pg_doio = nfs_generic_pg_readpages,
393};
394
395/*
396 * This is the callback from RPC telling us whether a reply was
397 * received or some error occurred (timeout or socket shutdown).
398 */
399int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
400{
 
401	int status;
402
403	dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
404			task->tk_status);
405
406	status = NFS_PROTO(data->inode)->read_done(task, data);
407	if (status != 0)
408		return status;
409
410	nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
411
412	if (task->tk_status == -ESTALE) {
413		set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
414		nfs_mark_for_revalidate(data->inode);
415	}
416	return 0;
417}
418
419static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
420{
421	struct nfs_readargs *argp = &data->args;
422	struct nfs_readres *resp = &data->res;
423
424	if (resp->eof || resp->count == argp->count)
425		return;
426
427	/* This is a short read! */
428	nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
429	/* Has the server at least made some progress? */
430	if (resp->count == 0)
 
431		return;
432
433	/* Yes, so retry the read at the end of the data */
434	data->mds_offset += resp->count;
435	argp->offset += resp->count;
436	argp->pgbase += resp->count;
437	argp->count -= resp->count;
438	nfs_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
439}
440
441/*
442 * Handle a read reply that fills part of a page.
443 */
444static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
445{
446	struct nfs_read_data *data = calldata;
447 
 
 
448	if (nfs_readpage_result(task, data) != 0)
449		return;
450	if (task->tk_status < 0)
451		return;
452
453	nfs_readpage_truncate_uninitialised_page(data);
454	nfs_readpage_retry(task, data);
 
 
 
 
 
 
 
 
 
 
455}
456
457static void nfs_readpage_release_partial(void *calldata)
458{
459	struct nfs_read_data *data = calldata;
460	struct nfs_page *req = data->req;
461	struct page *page = req->wb_page;
462	int status = data->task.tk_status;
463
464	if (status < 0)
465		SetPageError(page);
466
467	if (atomic_dec_and_test(&req->wb_complete)) {
468		if (!PageError(page))
469			SetPageUptodate(page);
470		nfs_readpage_release(req);
471	}
472	nfs_readdata_release(calldata);
473}
474
475#if defined(CONFIG_NFS_V4_1)
476void nfs_read_prepare(struct rpc_task *task, void *calldata)
477{
478	struct nfs_read_data *data = calldata;
479
480	if (nfs4_setup_sequence(NFS_SERVER(data->inode),
481				&data->args.seq_args, &data->res.seq_res,
482				0, task))
483		return;
484	rpc_call_start(task);
485}
486#endif /* CONFIG_NFS_V4_1 */
487
488static const struct rpc_call_ops nfs_read_partial_ops = {
489#if defined(CONFIG_NFS_V4_1)
490	.rpc_call_prepare = nfs_read_prepare,
491#endif /* CONFIG_NFS_V4_1 */
492	.rpc_call_done = nfs_readpage_result_partial,
493	.rpc_release = nfs_readpage_release_partial,
494};
495
496static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
497{
498	unsigned int count = data->res.count;
499	unsigned int base = data->args.pgbase;
500	struct page **pages;
501
502	if (data->res.eof)
503		count = data->args.count;
504	if (unlikely(count == 0))
505		return;
506	pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
507	base &= ~PAGE_CACHE_MASK;
508	count += base;
509	for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
510		SetPageUptodate(*pages);
511	if (count == 0)
512		return;
513	/* Was this a short read? */
514	if (data->res.eof || data->res.count == data->args.count)
515		SetPageUptodate(*pages);
516}
517
518/*
519 * This is the callback from RPC telling us whether a reply was
520 * received or some error occurred (timeout or socket shutdown).
521 */
522static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
523{
524	struct nfs_read_data *data = calldata;
525
526	if (nfs_readpage_result(task, data) != 0)
527		return;
528	if (task->tk_status < 0)
529		return;
530	/*
531	 * Note: nfs_readpage_retry may change the values of
532	 * data->args. In the multi-page case, we therefore need
533	 * to ensure that we call nfs_readpage_set_pages_uptodate()
534	 * first.
535	 */
536	nfs_readpage_truncate_uninitialised_page(data);
537	nfs_readpage_set_pages_uptodate(data);
538	nfs_readpage_retry(task, data);
539}
540
541static void nfs_readpage_release_full(void *calldata)
542{
543	struct nfs_read_data *data = calldata;
544
545	while (!list_empty(&data->pages)) {
546		struct nfs_page *req = nfs_list_entry(data->pages.next);
547
548		nfs_list_remove_request(req);
549		nfs_readpage_release(req);
550	}
551	nfs_readdata_release(calldata);
552}
553
554static const struct rpc_call_ops nfs_read_full_ops = {
555#if defined(CONFIG_NFS_V4_1)
556	.rpc_call_prepare = nfs_read_prepare,
557#endif /* CONFIG_NFS_V4_1 */
558	.rpc_call_done = nfs_readpage_result_full,
559	.rpc_release = nfs_readpage_release_full,
560};
561
562/*
563 * Read a page over NFS.
564 * We read the page synchronously in the following case:
565 *  -	The error flag is set for this page. This happens only when a
566 *	previous async read operation failed.
567 */
568int nfs_readpage(struct file *file, struct page *page)
569{
570	struct nfs_open_context *ctx;
571	struct inode *inode = page->mapping->host;
572	int		error;
573
574	dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
575		page, PAGE_CACHE_SIZE, page->index);
576	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
577	nfs_add_stats(inode, NFSIOS_READPAGES, 1);
578
579	/*
580	 * Try to flush any pending writes to the file..
581	 *
582	 * NOTE! Because we own the page lock, there cannot
583	 * be any new pending writes generated at this point
584	 * for this page (other pages can be written to).
585	 */
586	error = nfs_wb_page(inode, page);
587	if (error)
588		goto out_unlock;
589	if (PageUptodate(page))
590		goto out_unlock;
591
592	error = -ESTALE;
593	if (NFS_STALE(inode))
594		goto out_unlock;
595
596	if (file == NULL) {
597		error = -EBADF;
598		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
599		if (ctx == NULL)
600			goto out_unlock;
601	} else
602		ctx = get_nfs_open_context(nfs_file_open_context(file));
603
604	if (!IS_SYNC(inode)) {
605		error = nfs_readpage_from_fscache(ctx, inode, page);
606		if (error == 0)
607			goto out;
608	}
609
610	error = nfs_readpage_async(ctx, inode, page);
611
612out:
613	put_nfs_open_context(ctx);
614	return error;
615out_unlock:
616	unlock_page(page);
617	return error;
618}
619
620struct nfs_readdesc {
621	struct nfs_pageio_descriptor *pgio;
622	struct nfs_open_context *ctx;
623};
624
625static int
626readpage_async_filler(void *data, struct page *page)
627{
628	struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
629	struct inode *inode = page->mapping->host;
630	struct nfs_page *new;
631	unsigned int len;
632	int error;
633
634	len = nfs_page_length(page);
635	if (len == 0)
636		return nfs_return_empty_page(page);
637
638	new = nfs_create_request(desc->ctx, inode, page, 0, len);
639	if (IS_ERR(new))
640		goto out_error;
641
642	if (len < PAGE_CACHE_SIZE)
643		zero_user_segment(page, len, PAGE_CACHE_SIZE);
644	if (!nfs_pageio_add_request(desc->pgio, new)) {
645		error = desc->pgio->pg_error;
646		goto out_unlock;
647	}
648	return 0;
649out_error:
650	error = PTR_ERR(new);
651	SetPageError(page);
652out_unlock:
653	unlock_page(page);
654	return error;
655}
656
657int nfs_readpages(struct file *filp, struct address_space *mapping,
658		struct list_head *pages, unsigned nr_pages)
659{
660	struct nfs_pageio_descriptor pgio;
661	struct nfs_readdesc desc = {
662		.pgio = &pgio,
663	};
664	struct inode *inode = mapping->host;
665	unsigned long npages;
666	int ret = -ESTALE;
667
668	dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
669			inode->i_sb->s_id,
670			(long long)NFS_FILEID(inode),
671			nr_pages);
672	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
673
674	if (NFS_STALE(inode))
675		goto out;
676
677	if (filp == NULL) {
678		desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
679		if (desc.ctx == NULL)
680			return -EBADF;
681	} else
682		desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
683
684	/* attempt to read as many of the pages as possible from the cache
685	 * - this returns -ENOBUFS immediately if the cookie is negative
686	 */
687	ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
688					 pages, &nr_pages);
689	if (ret == 0)
690		goto read_complete; /* all pages were read */
691
692	nfs_pageio_init_read(&pgio, inode);
693
694	ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
695
696	nfs_pageio_complete(&pgio);
 
697	npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
698	nfs_add_stats(inode, NFSIOS_READPAGES, npages);
699read_complete:
700	put_nfs_open_context(desc.ctx);
701out:
702	return ret;
703}
704
705int __init nfs_init_readpagecache(void)
706{
707	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
708					     sizeof(struct nfs_read_data),
709					     0, SLAB_HWCACHE_ALIGN,
710					     NULL);
711	if (nfs_rdata_cachep == NULL)
712		return -ENOMEM;
713
714	nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
715						     nfs_rdata_cachep);
716	if (nfs_rdata_mempool == NULL)
717		return -ENOMEM;
718
719	return 0;
720}
721
722void nfs_destroy_readpagecache(void)
723{
724	mempool_destroy(nfs_rdata_mempool);
725	kmem_cache_destroy(nfs_rdata_cachep);
726}