Loading...
1/*
2 * Device operations for the pnfs client.
3 *
4 * Copyright (c) 2002
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 * Garth Goodson <Garth.Goodson@netapp.com>
10 *
11 * Permission is granted to use, copy, create derivative works, and
12 * redistribute this software and such derivative works for any purpose,
13 * so long as the name of the University of Michigan is not used in
14 * any advertising or publicity pertaining to the use or distribution
15 * of this software without specific, written prior authorization. If
16 * the above copyright notice or any other identification of the
17 * University of Michigan is included in any copy of any portion of
18 * this software, then the disclaimer below must also be included.
19 *
20 * This software is provided as is, without representation or warranty
21 * of any kind either express or implied, including without limitation
22 * the implied warranties of merchantability, fitness for a particular
23 * purpose, or noninfringement. The Regents of the University of
24 * Michigan shall not be liable for any damages, including special,
25 * indirect, incidental, or consequential damages, with respect to any
26 * claim arising out of or in connection with the use of the software,
27 * even if it has been or is hereafter advised of the possibility of
28 * such damages.
29 */
30
31#include <linux/export.h>
32#include <linux/nfs_fs.h>
33#include "nfs4session.h"
34#include "internal.h"
35#include "pnfs.h"
36
37#define NFSDBG_FACILITY NFSDBG_PNFS
38
39/*
40 * Device ID RCU cache. A device ID is unique per server and layout type.
41 */
42#define NFS4_DEVICE_ID_HASH_BITS 5
43#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
44#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
45
46
47static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
48static DEFINE_SPINLOCK(nfs4_deviceid_lock);
49
50#ifdef NFS_DEBUG
51void
52nfs4_print_deviceid(const struct nfs4_deviceid *id)
53{
54 u32 *p = (u32 *)id;
55
56 dprintk("%s: device id= [%x%x%x%x]\n", __func__,
57 p[0], p[1], p[2], p[3]);
58}
59EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
60#endif
61
62static inline u32
63nfs4_deviceid_hash(const struct nfs4_deviceid *id)
64{
65 unsigned char *cptr = (unsigned char *)id->data;
66 unsigned int nbytes = NFS4_DEVICEID4_SIZE;
67 u32 x = 0;
68
69 while (nbytes--) {
70 x *= 37;
71 x += *cptr++;
72 }
73 return x & NFS4_DEVICE_ID_HASH_MASK;
74}
75
76static struct nfs4_deviceid_node *
77_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
78 const struct nfs_client *clp, const struct nfs4_deviceid *id,
79 long hash)
80{
81 struct nfs4_deviceid_node *d;
82
83 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
84 if (d->ld == ld && d->nfs_client == clp &&
85 !memcmp(&d->deviceid, id, sizeof(*id))) {
86 if (atomic_read(&d->ref))
87 return d;
88 else
89 continue;
90 }
91 return NULL;
92}
93
94static struct nfs4_deviceid_node *
95nfs4_get_device_info(struct nfs_server *server,
96 const struct nfs4_deviceid *dev_id,
97 struct rpc_cred *cred, gfp_t gfp_flags)
98{
99 struct nfs4_deviceid_node *d = NULL;
100 struct pnfs_device *pdev = NULL;
101 struct page **pages = NULL;
102 u32 max_resp_sz;
103 int max_pages;
104 int rc, i;
105
106 /*
107 * Use the session max response size as the basis for setting
108 * GETDEVICEINFO's maxcount
109 */
110 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
111 if (server->pnfs_curr_ld->max_deviceinfo_size &&
112 server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz)
113 max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size;
114 max_pages = nfs_page_array_len(0, max_resp_sz);
115 dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
116 __func__, server, max_resp_sz, max_pages);
117
118 pdev = kzalloc(sizeof(*pdev), gfp_flags);
119 if (!pdev)
120 return NULL;
121
122 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
123 if (!pages)
124 goto out_free_pdev;
125
126 for (i = 0; i < max_pages; i++) {
127 pages[i] = alloc_page(gfp_flags);
128 if (!pages[i])
129 goto out_free_pages;
130 }
131
132 memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
133 pdev->layout_type = server->pnfs_curr_ld->id;
134 pdev->pages = pages;
135 pdev->pgbase = 0;
136 pdev->pglen = max_resp_sz;
137 pdev->mincount = 0;
138 pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
139
140 rc = nfs4_proc_getdeviceinfo(server, pdev, cred);
141 dprintk("%s getdevice info returns %d\n", __func__, rc);
142 if (rc)
143 goto out_free_pages;
144
145 /*
146 * Found new device, need to decode it and then add it to the
147 * list of known devices for this mountpoint.
148 */
149 d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev,
150 gfp_flags);
151 if (d && pdev->nocache)
152 set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
153
154out_free_pages:
155 for (i = 0; i < max_pages; i++)
156 __free_page(pages[i]);
157 kfree(pages);
158out_free_pdev:
159 kfree(pdev);
160 dprintk("<-- %s d %p\n", __func__, d);
161 return d;
162}
163
164/*
165 * Lookup a deviceid in cache and get a reference count on it if found
166 *
167 * @clp nfs_client associated with deviceid
168 * @id deviceid to look up
169 */
170static struct nfs4_deviceid_node *
171__nfs4_find_get_deviceid(struct nfs_server *server,
172 const struct nfs4_deviceid *id, long hash)
173{
174 struct nfs4_deviceid_node *d;
175
176 rcu_read_lock();
177 d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id,
178 hash);
179 if (d != NULL && !atomic_inc_not_zero(&d->ref))
180 d = NULL;
181 rcu_read_unlock();
182 return d;
183}
184
185struct nfs4_deviceid_node *
186nfs4_find_get_deviceid(struct nfs_server *server,
187 const struct nfs4_deviceid *id, struct rpc_cred *cred,
188 gfp_t gfp_mask)
189{
190 long hash = nfs4_deviceid_hash(id);
191 struct nfs4_deviceid_node *d, *new;
192
193 d = __nfs4_find_get_deviceid(server, id, hash);
194 if (d)
195 return d;
196
197 new = nfs4_get_device_info(server, id, cred, gfp_mask);
198 if (!new)
199 return new;
200
201 spin_lock(&nfs4_deviceid_lock);
202 d = __nfs4_find_get_deviceid(server, id, hash);
203 if (d) {
204 spin_unlock(&nfs4_deviceid_lock);
205 server->pnfs_curr_ld->free_deviceid_node(new);
206 return d;
207 }
208 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
209 atomic_inc(&new->ref);
210 spin_unlock(&nfs4_deviceid_lock);
211
212 return new;
213}
214EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
215
216/*
217 * Remove a deviceid from cache
218 *
219 * @clp nfs_client associated with deviceid
220 * @id the deviceid to unhash
221 *
222 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
223 */
224void
225nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
226 const struct nfs_client *clp, const struct nfs4_deviceid *id)
227{
228 struct nfs4_deviceid_node *d;
229
230 spin_lock(&nfs4_deviceid_lock);
231 rcu_read_lock();
232 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
233 rcu_read_unlock();
234 if (!d) {
235 spin_unlock(&nfs4_deviceid_lock);
236 return;
237 }
238 hlist_del_init_rcu(&d->node);
239 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
240 spin_unlock(&nfs4_deviceid_lock);
241
242 /* balance the initial ref set in pnfs_insert_deviceid */
243 nfs4_put_deviceid_node(d);
244}
245EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
246
247void
248nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server,
249 const struct nfs4_deviceid *id)
250{
251 INIT_HLIST_NODE(&d->node);
252 INIT_HLIST_NODE(&d->tmpnode);
253 d->ld = server->pnfs_curr_ld;
254 d->nfs_client = server->nfs_client;
255 d->flags = 0;
256 d->deviceid = *id;
257 atomic_set(&d->ref, 1);
258}
259EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
260
261/*
262 * Dereference a deviceid node and delete it when its reference count drops
263 * to zero.
264 *
265 * @d deviceid node to put
266 *
267 * return true iff the node was deleted
268 * Note that since the test for d->ref == 0 is sufficient to establish
269 * that the node is no longer hashed in the global device id cache.
270 */
271bool
272nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
273{
274 if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) {
275 if (atomic_add_unless(&d->ref, -1, 2))
276 return false;
277 nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid);
278 }
279 if (!atomic_dec_and_test(&d->ref))
280 return false;
281 d->ld->free_deviceid_node(d);
282 return true;
283}
284EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
285
286void
287nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
288{
289 node->timestamp_unavailable = jiffies;
290 set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
291}
292EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
293
294bool
295nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
296{
297 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
298 unsigned long start, end;
299
300 end = jiffies;
301 start = end - PNFS_DEVICE_RETRY_TIMEOUT;
302 if (time_in_range(node->timestamp_unavailable, start, end))
303 return true;
304 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
305 }
306 return false;
307}
308EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
309
310static void
311_deviceid_purge_client(const struct nfs_client *clp, long hash)
312{
313 struct nfs4_deviceid_node *d;
314 HLIST_HEAD(tmp);
315
316 spin_lock(&nfs4_deviceid_lock);
317 rcu_read_lock();
318 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
319 if (d->nfs_client == clp && atomic_read(&d->ref)) {
320 hlist_del_init_rcu(&d->node);
321 hlist_add_head(&d->tmpnode, &tmp);
322 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
323 }
324 rcu_read_unlock();
325 spin_unlock(&nfs4_deviceid_lock);
326
327 if (hlist_empty(&tmp))
328 return;
329
330 while (!hlist_empty(&tmp)) {
331 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
332 hlist_del(&d->tmpnode);
333 nfs4_put_deviceid_node(d);
334 }
335}
336
337void
338nfs4_deviceid_purge_client(const struct nfs_client *clp)
339{
340 long h;
341
342 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
343 return;
344 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
345 _deviceid_purge_client(clp, h);
346}
347
348/*
349 * Stop use of all deviceids associated with an nfs_client
350 */
351void
352nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
353{
354 struct nfs4_deviceid_node *d;
355 int i;
356
357 rcu_read_lock();
358 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
359 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
360 if (d->nfs_client == clp)
361 set_bit(NFS_DEVICEID_INVALID, &d->flags);
362 }
363 rcu_read_unlock();
364}
1/*
2 * Device operations for the pnfs client.
3 *
4 * Copyright (c) 2002
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 * Garth Goodson <Garth.Goodson@netapp.com>
10 *
11 * Permission is granted to use, copy, create derivative works, and
12 * redistribute this software and such derivative works for any purpose,
13 * so long as the name of the University of Michigan is not used in
14 * any advertising or publicity pertaining to the use or distribution
15 * of this software without specific, written prior authorization. If
16 * the above copyright notice or any other identification of the
17 * University of Michigan is included in any copy of any portion of
18 * this software, then the disclaimer below must also be included.
19 *
20 * This software is provided as is, without representation or warranty
21 * of any kind either express or implied, including without limitation
22 * the implied warranties of merchantability, fitness for a particular
23 * purpose, or noninfringement. The Regents of the University of
24 * Michigan shall not be liable for any damages, including special,
25 * indirect, incidental, or consequential damages, with respect to any
26 * claim arising out of or in connection with the use of the software,
27 * even if it has been or is hereafter advised of the possibility of
28 * such damages.
29 */
30
31#include <linux/export.h>
32#include "pnfs.h"
33
34#define NFSDBG_FACILITY NFSDBG_PNFS
35
36/*
37 * Device ID RCU cache. A device ID is unique per server and layout type.
38 */
39#define NFS4_DEVICE_ID_HASH_BITS 5
40#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
41#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
42
43static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
44static DEFINE_SPINLOCK(nfs4_deviceid_lock);
45
46#ifdef NFS_DEBUG
47void
48nfs4_print_deviceid(const struct nfs4_deviceid *id)
49{
50 u32 *p = (u32 *)id;
51
52 dprintk("%s: device id= [%x%x%x%x]\n", __func__,
53 p[0], p[1], p[2], p[3]);
54}
55EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
56#endif
57
58static inline u32
59nfs4_deviceid_hash(const struct nfs4_deviceid *id)
60{
61 unsigned char *cptr = (unsigned char *)id->data;
62 unsigned int nbytes = NFS4_DEVICEID4_SIZE;
63 u32 x = 0;
64
65 while (nbytes--) {
66 x *= 37;
67 x += *cptr++;
68 }
69 return x & NFS4_DEVICE_ID_HASH_MASK;
70}
71
72static struct nfs4_deviceid_node *
73_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
74 const struct nfs_client *clp, const struct nfs4_deviceid *id,
75 long hash)
76{
77 struct nfs4_deviceid_node *d;
78 struct hlist_node *n;
79
80 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
81 if (d->ld == ld && d->nfs_client == clp &&
82 !memcmp(&d->deviceid, id, sizeof(*id))) {
83 if (atomic_read(&d->ref))
84 return d;
85 else
86 continue;
87 }
88 return NULL;
89}
90
91/*
92 * Lookup a deviceid in cache and get a reference count on it if found
93 *
94 * @clp nfs_client associated with deviceid
95 * @id deviceid to look up
96 */
97static struct nfs4_deviceid_node *
98_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
99 const struct nfs_client *clp, const struct nfs4_deviceid *id,
100 long hash)
101{
102 struct nfs4_deviceid_node *d;
103
104 rcu_read_lock();
105 d = _lookup_deviceid(ld, clp, id, hash);
106 if (d != NULL)
107 atomic_inc(&d->ref);
108 rcu_read_unlock();
109 return d;
110}
111
112struct nfs4_deviceid_node *
113nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
114 const struct nfs_client *clp, const struct nfs4_deviceid *id)
115{
116 return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
117}
118EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
119
120/*
121 * Remove a deviceid from cache
122 *
123 * @clp nfs_client associated with deviceid
124 * @id the deviceid to unhash
125 *
126 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
127 */
128void
129nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
130 const struct nfs_client *clp, const struct nfs4_deviceid *id)
131{
132 struct nfs4_deviceid_node *d;
133
134 spin_lock(&nfs4_deviceid_lock);
135 rcu_read_lock();
136 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
137 rcu_read_unlock();
138 if (!d) {
139 spin_unlock(&nfs4_deviceid_lock);
140 return;
141 }
142 hlist_del_init_rcu(&d->node);
143 spin_unlock(&nfs4_deviceid_lock);
144 synchronize_rcu();
145
146 /* balance the initial ref set in pnfs_insert_deviceid */
147 if (atomic_dec_and_test(&d->ref))
148 d->ld->free_deviceid_node(d);
149}
150EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
151
152void
153nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
154 const struct pnfs_layoutdriver_type *ld,
155 const struct nfs_client *nfs_client,
156 const struct nfs4_deviceid *id)
157{
158 INIT_HLIST_NODE(&d->node);
159 INIT_HLIST_NODE(&d->tmpnode);
160 d->ld = ld;
161 d->nfs_client = nfs_client;
162 d->flags = 0;
163 d->deviceid = *id;
164 atomic_set(&d->ref, 1);
165}
166EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
167
168/*
169 * Uniquely initialize and insert a deviceid node into cache
170 *
171 * @new new deviceid node
172 * Note that the caller must set up the following members:
173 * new->ld
174 * new->nfs_client
175 * new->deviceid
176 *
177 * @ret the inserted node, if none found, otherwise, the found entry.
178 */
179struct nfs4_deviceid_node *
180nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
181{
182 struct nfs4_deviceid_node *d;
183 long hash;
184
185 spin_lock(&nfs4_deviceid_lock);
186 hash = nfs4_deviceid_hash(&new->deviceid);
187 d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
188 if (d) {
189 spin_unlock(&nfs4_deviceid_lock);
190 return d;
191 }
192
193 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
194 spin_unlock(&nfs4_deviceid_lock);
195 atomic_inc(&new->ref);
196
197 return new;
198}
199EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
200
201/*
202 * Dereference a deviceid node and delete it when its reference count drops
203 * to zero.
204 *
205 * @d deviceid node to put
206 *
207 * return true iff the node was deleted
208 * Note that since the test for d->ref == 0 is sufficient to establish
209 * that the node is no longer hashed in the global device id cache.
210 */
211bool
212nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
213{
214 if (!atomic_dec_and_test(&d->ref))
215 return false;
216 d->ld->free_deviceid_node(d);
217 return true;
218}
219EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
220
221static void
222_deviceid_purge_client(const struct nfs_client *clp, long hash)
223{
224 struct nfs4_deviceid_node *d;
225 struct hlist_node *n;
226 HLIST_HEAD(tmp);
227
228 spin_lock(&nfs4_deviceid_lock);
229 rcu_read_lock();
230 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
231 if (d->nfs_client == clp && atomic_read(&d->ref)) {
232 hlist_del_init_rcu(&d->node);
233 hlist_add_head(&d->tmpnode, &tmp);
234 }
235 rcu_read_unlock();
236 spin_unlock(&nfs4_deviceid_lock);
237
238 if (hlist_empty(&tmp))
239 return;
240
241 synchronize_rcu();
242 while (!hlist_empty(&tmp)) {
243 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
244 hlist_del(&d->tmpnode);
245 if (atomic_dec_and_test(&d->ref))
246 d->ld->free_deviceid_node(d);
247 }
248}
249
250void
251nfs4_deviceid_purge_client(const struct nfs_client *clp)
252{
253 long h;
254
255 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
256 return;
257 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
258 _deviceid_purge_client(clp, h);
259}
260
261/*
262 * Stop use of all deviceids associated with an nfs_client
263 */
264void
265nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
266{
267 struct nfs4_deviceid_node *d;
268 struct hlist_node *n;
269 int i;
270
271 rcu_read_lock();
272 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
273 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
274 if (d->nfs_client == clp)
275 set_bit(NFS_DEVICEID_INVALID, &d->flags);
276 }
277 rcu_read_unlock();
278}