Loading...
1/*
2 * Device operations for the pnfs client.
3 *
4 * Copyright (c) 2002
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 * Garth Goodson <Garth.Goodson@netapp.com>
10 *
11 * Permission is granted to use, copy, create derivative works, and
12 * redistribute this software and such derivative works for any purpose,
13 * so long as the name of the University of Michigan is not used in
14 * any advertising or publicity pertaining to the use or distribution
15 * of this software without specific, written prior authorization. If
16 * the above copyright notice or any other identification of the
17 * University of Michigan is included in any copy of any portion of
18 * this software, then the disclaimer below must also be included.
19 *
20 * This software is provided as is, without representation or warranty
21 * of any kind either express or implied, including without limitation
22 * the implied warranties of merchantability, fitness for a particular
23 * purpose, or noninfringement. The Regents of the University of
24 * Michigan shall not be liable for any damages, including special,
25 * indirect, incidental, or consequential damages, with respect to any
26 * claim arising out of or in connection with the use of the software,
27 * even if it has been or is hereafter advised of the possibility of
28 * such damages.
29 */
30
31#include <linux/export.h>
32#include <linux/nfs_fs.h>
33#include "nfs4session.h"
34#include "internal.h"
35#include "pnfs.h"
36
37#define NFSDBG_FACILITY NFSDBG_PNFS
38
39/*
40 * Device ID RCU cache. A device ID is unique per server and layout type.
41 */
42#define NFS4_DEVICE_ID_HASH_BITS 5
43#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
44#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
45
46
47static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
48static DEFINE_SPINLOCK(nfs4_deviceid_lock);
49
50#ifdef NFS_DEBUG
51void
52nfs4_print_deviceid(const struct nfs4_deviceid *id)
53{
54 u32 *p = (u32 *)id;
55
56 dprintk("%s: device id= [%x%x%x%x]\n", __func__,
57 p[0], p[1], p[2], p[3]);
58}
59EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
60#endif
61
62static inline u32
63nfs4_deviceid_hash(const struct nfs4_deviceid *id)
64{
65 unsigned char *cptr = (unsigned char *)id->data;
66 unsigned int nbytes = NFS4_DEVICEID4_SIZE;
67 u32 x = 0;
68
69 while (nbytes--) {
70 x *= 37;
71 x += *cptr++;
72 }
73 return x & NFS4_DEVICE_ID_HASH_MASK;
74}
75
76static struct nfs4_deviceid_node *
77_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
78 const struct nfs_client *clp, const struct nfs4_deviceid *id,
79 long hash)
80{
81 struct nfs4_deviceid_node *d;
82
83 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
84 if (d->ld == ld && d->nfs_client == clp &&
85 !memcmp(&d->deviceid, id, sizeof(*id))) {
86 if (atomic_read(&d->ref))
87 return d;
88 else
89 continue;
90 }
91 return NULL;
92}
93
94static struct nfs4_deviceid_node *
95nfs4_get_device_info(struct nfs_server *server,
96 const struct nfs4_deviceid *dev_id,
97 const struct cred *cred, gfp_t gfp_flags)
98{
99 struct nfs4_deviceid_node *d = NULL;
100 struct pnfs_device *pdev = NULL;
101 struct page **pages = NULL;
102 u32 max_resp_sz;
103 int max_pages;
104 int rc, i;
105
106 /*
107 * Use the session max response size as the basis for setting
108 * GETDEVICEINFO's maxcount
109 */
110 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
111 if (server->pnfs_curr_ld->max_deviceinfo_size &&
112 server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz)
113 max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size;
114 max_pages = nfs_page_array_len(0, max_resp_sz);
115 dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
116 __func__, server, max_resp_sz, max_pages);
117
118 pdev = kzalloc(sizeof(*pdev), gfp_flags);
119 if (!pdev)
120 return NULL;
121
122 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
123 if (!pages)
124 goto out_free_pdev;
125
126 for (i = 0; i < max_pages; i++) {
127 pages[i] = alloc_page(gfp_flags);
128 if (!pages[i])
129 goto out_free_pages;
130 }
131
132 memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
133 pdev->layout_type = server->pnfs_curr_ld->id;
134 pdev->pages = pages;
135 pdev->pgbase = 0;
136 pdev->pglen = max_resp_sz;
137 pdev->mincount = 0;
138 pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
139
140 rc = nfs4_proc_getdeviceinfo(server, pdev, cred);
141 dprintk("%s getdevice info returns %d\n", __func__, rc);
142 if (rc)
143 goto out_free_pages;
144
145 /*
146 * Found new device, need to decode it and then add it to the
147 * list of known devices for this mountpoint.
148 */
149 d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev,
150 gfp_flags);
151 if (d && pdev->nocache)
152 set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
153
154out_free_pages:
155 for (i = 0; i < max_pages; i++)
156 __free_page(pages[i]);
157 kfree(pages);
158out_free_pdev:
159 kfree(pdev);
160 dprintk("<-- %s d %p\n", __func__, d);
161 return d;
162}
163
164/*
165 * Lookup a deviceid in cache and get a reference count on it if found
166 *
167 * @clp nfs_client associated with deviceid
168 * @id deviceid to look up
169 */
170static struct nfs4_deviceid_node *
171__nfs4_find_get_deviceid(struct nfs_server *server,
172 const struct nfs4_deviceid *id, long hash)
173{
174 struct nfs4_deviceid_node *d;
175
176 rcu_read_lock();
177 d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id,
178 hash);
179 if (d != NULL && !atomic_inc_not_zero(&d->ref))
180 d = NULL;
181 rcu_read_unlock();
182 return d;
183}
184
185struct nfs4_deviceid_node *
186nfs4_find_get_deviceid(struct nfs_server *server,
187 const struct nfs4_deviceid *id, const struct cred *cred,
188 gfp_t gfp_mask)
189{
190 long hash = nfs4_deviceid_hash(id);
191 struct nfs4_deviceid_node *d, *new;
192
193 d = __nfs4_find_get_deviceid(server, id, hash);
194 if (d)
195 return d;
196
197 new = nfs4_get_device_info(server, id, cred, gfp_mask);
198 if (!new)
199 return new;
200
201 spin_lock(&nfs4_deviceid_lock);
202 d = __nfs4_find_get_deviceid(server, id, hash);
203 if (d) {
204 spin_unlock(&nfs4_deviceid_lock);
205 server->pnfs_curr_ld->free_deviceid_node(new);
206 return d;
207 }
208 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
209 atomic_inc(&new->ref);
210 spin_unlock(&nfs4_deviceid_lock);
211
212 return new;
213}
214EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
215
216/*
217 * Remove a deviceid from cache
218 *
219 * @clp nfs_client associated with deviceid
220 * @id the deviceid to unhash
221 *
222 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
223 */
224void
225nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
226 const struct nfs_client *clp, const struct nfs4_deviceid *id)
227{
228 struct nfs4_deviceid_node *d;
229
230 spin_lock(&nfs4_deviceid_lock);
231 rcu_read_lock();
232 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
233 rcu_read_unlock();
234 if (!d) {
235 spin_unlock(&nfs4_deviceid_lock);
236 return;
237 }
238 hlist_del_init_rcu(&d->node);
239 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
240 spin_unlock(&nfs4_deviceid_lock);
241
242 /* balance the initial ref set in pnfs_insert_deviceid */
243 nfs4_put_deviceid_node(d);
244}
245EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
246
247void
248nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server,
249 const struct nfs4_deviceid *id)
250{
251 INIT_HLIST_NODE(&d->node);
252 INIT_HLIST_NODE(&d->tmpnode);
253 d->ld = server->pnfs_curr_ld;
254 d->nfs_client = server->nfs_client;
255 d->flags = 0;
256 d->deviceid = *id;
257 atomic_set(&d->ref, 1);
258}
259EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
260
261/*
262 * Dereference a deviceid node and delete it when its reference count drops
263 * to zero.
264 *
265 * @d deviceid node to put
266 *
267 * return true iff the node was deleted
268 * Note that since the test for d->ref == 0 is sufficient to establish
269 * that the node is no longer hashed in the global device id cache.
270 */
271bool
272nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
273{
274 if (test_bit(NFS_DEVICEID_NOCACHE, &d->flags)) {
275 if (atomic_add_unless(&d->ref, -1, 2))
276 return false;
277 nfs4_delete_deviceid(d->ld, d->nfs_client, &d->deviceid);
278 }
279 if (!atomic_dec_and_test(&d->ref))
280 return false;
281 d->ld->free_deviceid_node(d);
282 return true;
283}
284EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
285
286void
287nfs4_mark_deviceid_available(struct nfs4_deviceid_node *node)
288{
289 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
290 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
291 smp_mb__after_atomic();
292 }
293}
294EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available);
295
296void
297nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
298{
299 node->timestamp_unavailable = jiffies;
300 smp_mb__before_atomic();
301 set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
302 smp_mb__after_atomic();
303}
304EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
305
306bool
307nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
308{
309 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
310 unsigned long start, end;
311
312 end = jiffies;
313 start = end - PNFS_DEVICE_RETRY_TIMEOUT;
314 if (time_in_range(node->timestamp_unavailable, start, end))
315 return true;
316 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
317 smp_mb__after_atomic();
318 }
319 return false;
320}
321EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
322
323static void
324_deviceid_purge_client(const struct nfs_client *clp, long hash)
325{
326 struct nfs4_deviceid_node *d;
327 HLIST_HEAD(tmp);
328
329 spin_lock(&nfs4_deviceid_lock);
330 rcu_read_lock();
331 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
332 if (d->nfs_client == clp && atomic_read(&d->ref)) {
333 hlist_del_init_rcu(&d->node);
334 hlist_add_head(&d->tmpnode, &tmp);
335 clear_bit(NFS_DEVICEID_NOCACHE, &d->flags);
336 }
337 rcu_read_unlock();
338 spin_unlock(&nfs4_deviceid_lock);
339
340 if (hlist_empty(&tmp))
341 return;
342
343 while (!hlist_empty(&tmp)) {
344 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
345 hlist_del(&d->tmpnode);
346 nfs4_put_deviceid_node(d);
347 }
348}
349
350void
351nfs4_deviceid_purge_client(const struct nfs_client *clp)
352{
353 long h;
354
355 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
356 return;
357 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
358 _deviceid_purge_client(clp, h);
359}
360
361/*
362 * Stop use of all deviceids associated with an nfs_client
363 */
364void
365nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
366{
367 struct nfs4_deviceid_node *d;
368 int i;
369
370 rcu_read_lock();
371 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
372 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
373 if (d->nfs_client == clp)
374 set_bit(NFS_DEVICEID_INVALID, &d->flags);
375 }
376 rcu_read_unlock();
377}
1/*
2 * Device operations for the pnfs client.
3 *
4 * Copyright (c) 2002
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 * Garth Goodson <Garth.Goodson@netapp.com>
10 *
11 * Permission is granted to use, copy, create derivative works, and
12 * redistribute this software and such derivative works for any purpose,
13 * so long as the name of the University of Michigan is not used in
14 * any advertising or publicity pertaining to the use or distribution
15 * of this software without specific, written prior authorization. If
16 * the above copyright notice or any other identification of the
17 * University of Michigan is included in any copy of any portion of
18 * this software, then the disclaimer below must also be included.
19 *
20 * This software is provided as is, without representation or warranty
21 * of any kind either express or implied, including without limitation
22 * the implied warranties of merchantability, fitness for a particular
23 * purpose, or noninfringement. The Regents of the University of
24 * Michigan shall not be liable for any damages, including special,
25 * indirect, incidental, or consequential damages, with respect to any
26 * claim arising out of or in connection with the use of the software,
27 * even if it has been or is hereafter advised of the possibility of
28 * such damages.
29 */
30
31#include <linux/export.h>
32#include "pnfs.h"
33
34#define NFSDBG_FACILITY NFSDBG_PNFS
35
36/*
37 * Device ID RCU cache. A device ID is unique per server and layout type.
38 */
39#define NFS4_DEVICE_ID_HASH_BITS 5
40#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
41#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
42
43#define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
44
45static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
46static DEFINE_SPINLOCK(nfs4_deviceid_lock);
47
48#ifdef NFS_DEBUG
49void
50nfs4_print_deviceid(const struct nfs4_deviceid *id)
51{
52 u32 *p = (u32 *)id;
53
54 dprintk("%s: device id= [%x%x%x%x]\n", __func__,
55 p[0], p[1], p[2], p[3]);
56}
57EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
58#endif
59
60static inline u32
61nfs4_deviceid_hash(const struct nfs4_deviceid *id)
62{
63 unsigned char *cptr = (unsigned char *)id->data;
64 unsigned int nbytes = NFS4_DEVICEID4_SIZE;
65 u32 x = 0;
66
67 while (nbytes--) {
68 x *= 37;
69 x += *cptr++;
70 }
71 return x & NFS4_DEVICE_ID_HASH_MASK;
72}
73
74static struct nfs4_deviceid_node *
75_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
76 const struct nfs_client *clp, const struct nfs4_deviceid *id,
77 long hash)
78{
79 struct nfs4_deviceid_node *d;
80
81 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
82 if (d->ld == ld && d->nfs_client == clp &&
83 !memcmp(&d->deviceid, id, sizeof(*id))) {
84 if (atomic_read(&d->ref))
85 return d;
86 else
87 continue;
88 }
89 return NULL;
90}
91
92/*
93 * Lookup a deviceid in cache and get a reference count on it if found
94 *
95 * @clp nfs_client associated with deviceid
96 * @id deviceid to look up
97 */
98static struct nfs4_deviceid_node *
99_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
100 const struct nfs_client *clp, const struct nfs4_deviceid *id,
101 long hash)
102{
103 struct nfs4_deviceid_node *d;
104
105 rcu_read_lock();
106 d = _lookup_deviceid(ld, clp, id, hash);
107 if (d != NULL)
108 atomic_inc(&d->ref);
109 rcu_read_unlock();
110 return d;
111}
112
113struct nfs4_deviceid_node *
114nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
115 const struct nfs_client *clp, const struct nfs4_deviceid *id)
116{
117 return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
118}
119EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
120
121/*
122 * Remove a deviceid from cache
123 *
124 * @clp nfs_client associated with deviceid
125 * @id the deviceid to unhash
126 *
127 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
128 */
129void
130nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
131 const struct nfs_client *clp, const struct nfs4_deviceid *id)
132{
133 struct nfs4_deviceid_node *d;
134
135 spin_lock(&nfs4_deviceid_lock);
136 rcu_read_lock();
137 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
138 rcu_read_unlock();
139 if (!d) {
140 spin_unlock(&nfs4_deviceid_lock);
141 return;
142 }
143 hlist_del_init_rcu(&d->node);
144 spin_unlock(&nfs4_deviceid_lock);
145 synchronize_rcu();
146
147 /* balance the initial ref set in pnfs_insert_deviceid */
148 if (atomic_dec_and_test(&d->ref))
149 d->ld->free_deviceid_node(d);
150}
151EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
152
153void
154nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
155 const struct pnfs_layoutdriver_type *ld,
156 const struct nfs_client *nfs_client,
157 const struct nfs4_deviceid *id)
158{
159 INIT_HLIST_NODE(&d->node);
160 INIT_HLIST_NODE(&d->tmpnode);
161 d->ld = ld;
162 d->nfs_client = nfs_client;
163 d->flags = 0;
164 d->deviceid = *id;
165 atomic_set(&d->ref, 1);
166}
167EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
168
169/*
170 * Uniquely initialize and insert a deviceid node into cache
171 *
172 * @new new deviceid node
173 * Note that the caller must set up the following members:
174 * new->ld
175 * new->nfs_client
176 * new->deviceid
177 *
178 * @ret the inserted node, if none found, otherwise, the found entry.
179 */
180struct nfs4_deviceid_node *
181nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
182{
183 struct nfs4_deviceid_node *d;
184 long hash;
185
186 spin_lock(&nfs4_deviceid_lock);
187 hash = nfs4_deviceid_hash(&new->deviceid);
188 d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
189 if (d) {
190 spin_unlock(&nfs4_deviceid_lock);
191 return d;
192 }
193
194 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
195 spin_unlock(&nfs4_deviceid_lock);
196 atomic_inc(&new->ref);
197
198 return new;
199}
200EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
201
202/*
203 * Dereference a deviceid node and delete it when its reference count drops
204 * to zero.
205 *
206 * @d deviceid node to put
207 *
208 * return true iff the node was deleted
209 * Note that since the test for d->ref == 0 is sufficient to establish
210 * that the node is no longer hashed in the global device id cache.
211 */
212bool
213nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
214{
215 if (!atomic_dec_and_test(&d->ref))
216 return false;
217 d->ld->free_deviceid_node(d);
218 return true;
219}
220EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
221
222void
223nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
224{
225 node->timestamp_unavailable = jiffies;
226 set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
227}
228EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
229
230bool
231nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
232{
233 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
234 unsigned long start, end;
235
236 end = jiffies;
237 start = end - PNFS_DEVICE_RETRY_TIMEOUT;
238 if (time_in_range(node->timestamp_unavailable, start, end))
239 return true;
240 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
241 }
242 return false;
243}
244EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
245
246static void
247_deviceid_purge_client(const struct nfs_client *clp, long hash)
248{
249 struct nfs4_deviceid_node *d;
250 HLIST_HEAD(tmp);
251
252 spin_lock(&nfs4_deviceid_lock);
253 rcu_read_lock();
254 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
255 if (d->nfs_client == clp && atomic_read(&d->ref)) {
256 hlist_del_init_rcu(&d->node);
257 hlist_add_head(&d->tmpnode, &tmp);
258 }
259 rcu_read_unlock();
260 spin_unlock(&nfs4_deviceid_lock);
261
262 if (hlist_empty(&tmp))
263 return;
264
265 synchronize_rcu();
266 while (!hlist_empty(&tmp)) {
267 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
268 hlist_del(&d->tmpnode);
269 if (atomic_dec_and_test(&d->ref))
270 d->ld->free_deviceid_node(d);
271 }
272}
273
274void
275nfs4_deviceid_purge_client(const struct nfs_client *clp)
276{
277 long h;
278
279 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
280 return;
281 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
282 _deviceid_purge_client(clp, h);
283}
284
285/*
286 * Stop use of all deviceids associated with an nfs_client
287 */
288void
289nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
290{
291 struct nfs4_deviceid_node *d;
292 int i;
293
294 rcu_read_lock();
295 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
296 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
297 if (d->nfs_client == clp)
298 set_bit(NFS_DEVICEID_INVALID, &d->flags);
299 }
300 rcu_read_unlock();
301}
302