Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Object lifetime handling and tracing.
3 *
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/slab.h>
9#include "internal.h"
10
11/*
12 * Allocate an I/O request and initialise it.
13 */
14struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
15 struct file *file,
16 loff_t start, size_t len,
17 enum netfs_io_origin origin)
18{
19 static atomic_t debug_ids;
20 struct inode *inode = file ? file_inode(file) : mapping->host;
21 struct netfs_inode *ctx = netfs_inode(inode);
22 struct netfs_io_request *rreq;
23 int ret;
24
25 rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
26 if (!rreq)
27 return ERR_PTR(-ENOMEM);
28
29 rreq->start = start;
30 rreq->len = len;
31 rreq->origin = origin;
32 rreq->netfs_ops = ctx->ops;
33 rreq->mapping = mapping;
34 rreq->inode = inode;
35 rreq->i_size = i_size_read(inode);
36 rreq->debug_id = atomic_inc_return(&debug_ids);
37 INIT_LIST_HEAD(&rreq->subrequests);
38 refcount_set(&rreq->ref, 1);
39 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
40 if (rreq->netfs_ops->init_request) {
41 ret = rreq->netfs_ops->init_request(rreq, file);
42 if (ret < 0) {
43 kfree(rreq);
44 return ERR_PTR(ret);
45 }
46 }
47
48 netfs_stat(&netfs_n_rh_rreq);
49 return rreq;
50}
51
52void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
53{
54 int r;
55
56 __refcount_inc(&rreq->ref, &r);
57 trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
58}
59
60void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
61{
62 struct netfs_io_subrequest *subreq;
63
64 while (!list_empty(&rreq->subrequests)) {
65 subreq = list_first_entry(&rreq->subrequests,
66 struct netfs_io_subrequest, rreq_link);
67 list_del(&subreq->rreq_link);
68 netfs_put_subrequest(subreq, was_async,
69 netfs_sreq_trace_put_clear);
70 }
71}
72
73static void netfs_free_request(struct work_struct *work)
74{
75 struct netfs_io_request *rreq =
76 container_of(work, struct netfs_io_request, work);
77
78 trace_netfs_rreq(rreq, netfs_rreq_trace_free);
79 netfs_clear_subrequests(rreq, false);
80 if (rreq->netfs_ops->free_request)
81 rreq->netfs_ops->free_request(rreq);
82 if (rreq->cache_resources.ops)
83 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
84 kfree(rreq);
85 netfs_stat_d(&netfs_n_rh_rreq);
86}
87
88void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
89 enum netfs_rreq_ref_trace what)
90{
91 unsigned int debug_id = rreq->debug_id;
92 bool dead;
93 int r;
94
95 dead = __refcount_dec_and_test(&rreq->ref, &r);
96 trace_netfs_rreq_ref(debug_id, r - 1, what);
97 if (dead) {
98 if (was_async) {
99 rreq->work.func = netfs_free_request;
100 if (!queue_work(system_unbound_wq, &rreq->work))
101 BUG();
102 } else {
103 netfs_free_request(&rreq->work);
104 }
105 }
106}
107
108/*
109 * Allocate and partially initialise an I/O request structure.
110 */
111struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
112{
113 struct netfs_io_subrequest *subreq;
114
115 subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
116 if (subreq) {
117 INIT_LIST_HEAD(&subreq->rreq_link);
118 refcount_set(&subreq->ref, 2);
119 subreq->rreq = rreq;
120 netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
121 netfs_stat(&netfs_n_rh_sreq);
122 }
123
124 return subreq;
125}
126
127void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
128 enum netfs_sreq_ref_trace what)
129{
130 int r;
131
132 __refcount_inc(&subreq->ref, &r);
133 trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
134 what);
135}
136
137static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
138 bool was_async)
139{
140 struct netfs_io_request *rreq = subreq->rreq;
141
142 trace_netfs_sreq(subreq, netfs_sreq_trace_free);
143 kfree(subreq);
144 netfs_stat_d(&netfs_n_rh_sreq);
145 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
146}
147
148void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
149 enum netfs_sreq_ref_trace what)
150{
151 unsigned int debug_index = subreq->debug_index;
152 unsigned int debug_id = subreq->rreq->debug_id;
153 bool dead;
154 int r;
155
156 dead = __refcount_dec_and_test(&subreq->ref, &r);
157 trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
158 if (dead)
159 netfs_free_subrequest(subreq, was_async);
160}
1// SPDX-License-Identifier: GPL-2.0-only
2/* Object lifetime handling and tracing.
3 *
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/slab.h>
9#include <linux/mempool.h>
10#include <linux/delay.h>
11#include "internal.h"
12
13/*
14 * Allocate an I/O request and initialise it.
15 */
16struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
17 struct file *file,
18 loff_t start, size_t len,
19 enum netfs_io_origin origin)
20{
21 static atomic_t debug_ids;
22 struct inode *inode = file ? file_inode(file) : mapping->host;
23 struct netfs_inode *ctx = netfs_inode(inode);
24 struct netfs_io_request *rreq;
25 mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
26 struct kmem_cache *cache = mempool->pool_data;
27 int ret;
28
29 for (;;) {
30 rreq = mempool_alloc(mempool, GFP_KERNEL);
31 if (rreq)
32 break;
33 msleep(10);
34 }
35
36 memset(rreq, 0, kmem_cache_size(cache));
37 rreq->start = start;
38 rreq->len = len;
39 rreq->origin = origin;
40 rreq->netfs_ops = ctx->ops;
41 rreq->mapping = mapping;
42 rreq->inode = inode;
43 rreq->i_size = i_size_read(inode);
44 rreq->debug_id = atomic_inc_return(&debug_ids);
45 rreq->wsize = INT_MAX;
46 rreq->io_streams[0].sreq_max_len = ULONG_MAX;
47 rreq->io_streams[0].sreq_max_segs = 0;
48 spin_lock_init(&rreq->lock);
49 INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
50 INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
51 INIT_LIST_HEAD(&rreq->subrequests);
52 refcount_set(&rreq->ref, 1);
53
54 if (origin == NETFS_READAHEAD ||
55 origin == NETFS_READPAGE ||
56 origin == NETFS_READ_GAPS ||
57 origin == NETFS_READ_FOR_WRITE ||
58 origin == NETFS_DIO_READ)
59 INIT_WORK(&rreq->work, netfs_read_termination_worker);
60 else
61 INIT_WORK(&rreq->work, netfs_write_collection_worker);
62
63 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
64 if (file && file->f_flags & O_NONBLOCK)
65 __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
66 if (rreq->netfs_ops->init_request) {
67 ret = rreq->netfs_ops->init_request(rreq, file);
68 if (ret < 0) {
69 mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
70 return ERR_PTR(ret);
71 }
72 }
73
74 atomic_inc(&ctx->io_count);
75 trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
76 netfs_proc_add_rreq(rreq);
77 netfs_stat(&netfs_n_rh_rreq);
78 return rreq;
79}
80
81void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
82{
83 int r;
84
85 __refcount_inc(&rreq->ref, &r);
86 trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
87}
88
89void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
90{
91 struct netfs_io_subrequest *subreq;
92 struct netfs_io_stream *stream;
93 int s;
94
95 while (!list_empty(&rreq->subrequests)) {
96 subreq = list_first_entry(&rreq->subrequests,
97 struct netfs_io_subrequest, rreq_link);
98 list_del(&subreq->rreq_link);
99 netfs_put_subrequest(subreq, was_async,
100 netfs_sreq_trace_put_clear);
101 }
102
103 for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
104 stream = &rreq->io_streams[s];
105 while (!list_empty(&stream->subrequests)) {
106 subreq = list_first_entry(&stream->subrequests,
107 struct netfs_io_subrequest, rreq_link);
108 list_del(&subreq->rreq_link);
109 netfs_put_subrequest(subreq, was_async,
110 netfs_sreq_trace_put_clear);
111 }
112 }
113}
114
115static void netfs_free_request_rcu(struct rcu_head *rcu)
116{
117 struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
118
119 mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
120 netfs_stat_d(&netfs_n_rh_rreq);
121}
122
123static void netfs_free_request(struct work_struct *work)
124{
125 struct netfs_io_request *rreq =
126 container_of(work, struct netfs_io_request, work);
127 struct netfs_inode *ictx = netfs_inode(rreq->inode);
128 unsigned int i;
129
130 trace_netfs_rreq(rreq, netfs_rreq_trace_free);
131 netfs_proc_del_rreq(rreq);
132 netfs_clear_subrequests(rreq, false);
133 if (rreq->netfs_ops->free_request)
134 rreq->netfs_ops->free_request(rreq);
135 if (rreq->cache_resources.ops)
136 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
137 if (rreq->direct_bv) {
138 for (i = 0; i < rreq->direct_bv_count; i++) {
139 if (rreq->direct_bv[i].bv_page) {
140 if (rreq->direct_bv_unpin)
141 unpin_user_page(rreq->direct_bv[i].bv_page);
142 }
143 }
144 kvfree(rreq->direct_bv);
145 }
146 netfs_clear_buffer(rreq);
147
148 if (atomic_dec_and_test(&ictx->io_count))
149 wake_up_var(&ictx->io_count);
150 call_rcu(&rreq->rcu, netfs_free_request_rcu);
151}
152
153void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
154 enum netfs_rreq_ref_trace what)
155{
156 unsigned int debug_id;
157 bool dead;
158 int r;
159
160 if (rreq) {
161 debug_id = rreq->debug_id;
162 dead = __refcount_dec_and_test(&rreq->ref, &r);
163 trace_netfs_rreq_ref(debug_id, r - 1, what);
164 if (dead) {
165 if (was_async) {
166 rreq->work.func = netfs_free_request;
167 if (!queue_work(system_unbound_wq, &rreq->work))
168 WARN_ON(1);
169 } else {
170 netfs_free_request(&rreq->work);
171 }
172 }
173 }
174}
175
176/*
177 * Allocate and partially initialise an I/O request structure.
178 */
179struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
180{
181 struct netfs_io_subrequest *subreq;
182 mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
183 struct kmem_cache *cache = mempool->pool_data;
184
185 for (;;) {
186 subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
187 GFP_KERNEL);
188 if (subreq)
189 break;
190 msleep(10);
191 }
192
193 memset(subreq, 0, kmem_cache_size(cache));
194 INIT_WORK(&subreq->work, NULL);
195 INIT_LIST_HEAD(&subreq->rreq_link);
196 refcount_set(&subreq->ref, 2);
197 subreq->rreq = rreq;
198 subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
199 netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
200 netfs_stat(&netfs_n_rh_sreq);
201 return subreq;
202}
203
204void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
205 enum netfs_sreq_ref_trace what)
206{
207 int r;
208
209 __refcount_inc(&subreq->ref, &r);
210 trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
211 what);
212}
213
214static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
215 bool was_async)
216{
217 struct netfs_io_request *rreq = subreq->rreq;
218
219 trace_netfs_sreq(subreq, netfs_sreq_trace_free);
220 if (rreq->netfs_ops->free_subrequest)
221 rreq->netfs_ops->free_subrequest(subreq);
222 mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
223 netfs_stat_d(&netfs_n_rh_sreq);
224 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
225}
226
227void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
228 enum netfs_sreq_ref_trace what)
229{
230 unsigned int debug_index = subreq->debug_index;
231 unsigned int debug_id = subreq->rreq->debug_id;
232 bool dead;
233 int r;
234
235 dead = __refcount_dec_and_test(&subreq->ref, &r);
236 trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
237 if (dead)
238 netfs_free_subrequest(subreq, was_async);
239}