Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) 2001 Clemson University and The University of Chicago
4 * (C) 2011 Omnibond Systems
5 *
6 * Changes by Acxiom Corporation to implement generic service_operation()
7 * function, Copyright Acxiom Corporation, 2005.
8 *
9 * See COPYING in top-level directory.
10 */
11
12/*
13 * In-kernel waitqueue operations.
14 */
15
16#include "protocol.h"
17#include "orangefs-kernel.h"
18#include "orangefs-bufmap.h"
19
20static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
21 long timeout,
22 int flags)
23 __acquires(op->lock);
24static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
25 __releases(op->lock);
26
27/*
28 * What we do in this function is to walk the list of operations that are
29 * present in the request queue and mark them as purged.
30 * NOTE: This is called from the device close after client-core has
31 * guaranteed that no new operations could appear on the list since the
32 * client-core is anyway going to exit.
33 */
34void purge_waiting_ops(void)
35{
36 struct orangefs_kernel_op_s *op, *tmp;
37
38 spin_lock(&orangefs_request_list_lock);
39 list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
40 gossip_debug(GOSSIP_WAIT_DEBUG,
41 "pvfs2-client-core: purging op tag %llu %s\n",
42 llu(op->tag),
43 get_opname_string(op));
44 set_op_state_purged(op);
45 gossip_debug(GOSSIP_DEV_DEBUG,
46 "%s: op:%s: op_state:%d: process:%s:\n",
47 __func__,
48 get_opname_string(op),
49 op->op_state,
50 current->comm);
51 }
52 spin_unlock(&orangefs_request_list_lock);
53}
54
55/*
56 * submits a ORANGEFS operation and waits for it to complete
57 *
58 * Note op->downcall.status will contain the status of the operation (in
59 * errno format), whether provided by pvfs2-client or a result of failure to
60 * service the operation. If the caller wishes to distinguish, then
61 * op->state can be checked to see if it was serviced or not.
62 *
63 * Returns contents of op->downcall.status for convenience
64 */
65int service_operation(struct orangefs_kernel_op_s *op,
66 const char *op_name,
67 int flags)
68{
69 long timeout = MAX_SCHEDULE_TIMEOUT;
70 int ret = 0;
71
72 DEFINE_WAIT(wait_entry);
73
74 op->upcall.tgid = current->tgid;
75 op->upcall.pid = current->pid;
76
77retry_servicing:
78 op->downcall.status = 0;
79 gossip_debug(GOSSIP_WAIT_DEBUG,
80 "%s: %s op:%p: process:%s: pid:%d:\n",
81 __func__,
82 op_name,
83 op,
84 current->comm,
85 current->pid);
86
87 /*
88 * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid
89 * acquiring the request_mutex because we're servicing a
90 * high priority remount operation and the request_mutex is
91 * already taken.
92 */
93 if (!(flags & ORANGEFS_OP_NO_MUTEX)) {
94 if (flags & ORANGEFS_OP_INTERRUPTIBLE)
95 ret = mutex_lock_interruptible(&orangefs_request_mutex);
96 else
97 ret = mutex_lock_killable(&orangefs_request_mutex);
98 /*
99 * check to see if we were interrupted while waiting for
100 * mutex
101 */
102 if (ret < 0) {
103 op->downcall.status = ret;
104 gossip_debug(GOSSIP_WAIT_DEBUG,
105 "%s: service_operation interrupted.\n",
106 __func__);
107 return ret;
108 }
109 }
110
111 /* queue up the operation */
112 spin_lock(&orangefs_request_list_lock);
113 spin_lock(&op->lock);
114 set_op_state_waiting(op);
115 gossip_debug(GOSSIP_DEV_DEBUG,
116 "%s: op:%s: op_state:%d: process:%s:\n",
117 __func__,
118 get_opname_string(op),
119 op->op_state,
120 current->comm);
121 /* add high priority remount op to the front of the line. */
122 if (flags & ORANGEFS_OP_PRIORITY)
123 list_add(&op->list, &orangefs_request_list);
124 else
125 list_add_tail(&op->list, &orangefs_request_list);
126 spin_unlock(&op->lock);
127 wake_up_interruptible(&orangefs_request_list_waitq);
128 if (!__is_daemon_in_service()) {
129 gossip_debug(GOSSIP_WAIT_DEBUG,
130 "%s:client core is NOT in service.\n",
131 __func__);
132 /*
133 * Don't wait for the userspace component to return if
134 * the filesystem is being umounted anyway.
135 */
136 if (op->upcall.type == ORANGEFS_VFS_OP_FS_UMOUNT)
137 timeout = 0;
138 else
139 timeout = op_timeout_secs * HZ;
140 }
141 spin_unlock(&orangefs_request_list_lock);
142
143 if (!(flags & ORANGEFS_OP_NO_MUTEX))
144 mutex_unlock(&orangefs_request_mutex);
145
146 ret = wait_for_matching_downcall(op, timeout, flags);
147 gossip_debug(GOSSIP_WAIT_DEBUG,
148 "%s: wait_for_matching_downcall returned %d for %p\n",
149 __func__,
150 ret,
151 op);
152
153 /* got matching downcall; make sure status is in errno format */
154 if (!ret) {
155 spin_unlock(&op->lock);
156 op->downcall.status =
157 orangefs_normalize_to_errno(op->downcall.status);
158 ret = op->downcall.status;
159 goto out;
160 }
161
162 /* failed to get matching downcall */
163 if (ret == -ETIMEDOUT) {
164 gossip_err("%s: %s -- wait timed out; aborting attempt.\n",
165 __func__,
166 op_name);
167 }
168
169 /*
170 * remove a waiting op from the request list or
171 * remove an in-progress op from the in-progress list.
172 */
173 orangefs_clean_up_interrupted_operation(op);
174
175 op->downcall.status = ret;
176 /* retry if operation has not been serviced and if requested */
177 if (ret == -EAGAIN) {
178 op->attempts++;
179 timeout = op_timeout_secs * HZ;
180 gossip_debug(GOSSIP_WAIT_DEBUG,
181 "orangefs: tag %llu (%s)"
182 " -- operation to be retried (%d attempt)\n",
183 llu(op->tag),
184 op_name,
185 op->attempts);
186
187 /*
188 * io ops (ops that use the shared memory buffer) have
189 * to be returned to their caller for a retry. Other ops
190 * can just be recycled here.
191 */
192 if (!op->uses_shared_memory)
193 goto retry_servicing;
194 }
195
196out:
197 gossip_debug(GOSSIP_WAIT_DEBUG,
198 "%s: %s returning: %d for %p.\n",
199 __func__,
200 op_name,
201 ret,
202 op);
203 return ret;
204}
205
206/* This can get called on an I/O op if it had a bad service_operation. */
207bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op)
208{
209 u64 tag = op->tag;
210 if (!op_state_in_progress(op))
211 return false;
212
213 op->slot_to_free = op->upcall.req.io.buf_index;
214 memset(&op->upcall, 0, sizeof(op->upcall));
215 memset(&op->downcall, 0, sizeof(op->downcall));
216 op->upcall.type = ORANGEFS_VFS_OP_CANCEL;
217 op->upcall.req.cancel.op_tag = tag;
218 op->downcall.type = ORANGEFS_VFS_OP_INVALID;
219 op->downcall.status = -1;
220 orangefs_new_tag(op);
221
222 spin_lock(&orangefs_request_list_lock);
223 /* orangefs_request_list_lock is enough of a barrier here */
224 if (!__is_daemon_in_service()) {
225 spin_unlock(&orangefs_request_list_lock);
226 return false;
227 }
228 spin_lock(&op->lock);
229 set_op_state_waiting(op);
230 gossip_debug(GOSSIP_DEV_DEBUG,
231 "%s: op:%s: op_state:%d: process:%s:\n",
232 __func__,
233 get_opname_string(op),
234 op->op_state,
235 current->comm);
236 list_add(&op->list, &orangefs_request_list);
237 spin_unlock(&op->lock);
238 spin_unlock(&orangefs_request_list_lock);
239
240 gossip_debug(GOSSIP_WAIT_DEBUG,
241 "Attempting ORANGEFS operation cancellation of tag %llu\n",
242 llu(tag));
243 return true;
244}
245
246/*
247 * Change an op to the "given up" state and remove it from its list.
248 */
249static void
250 orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
251 __releases(op->lock)
252{
253 /*
254 * handle interrupted cases depending on what state we were in when
255 * the interruption is detected.
256 *
257 * Called with op->lock held.
258 */
259
260 /*
261 * List manipulation code elsewhere will ignore ops that
262 * have been given up upon.
263 */
264 op->op_state |= OP_VFS_STATE_GIVEN_UP;
265
266 if (list_empty(&op->list)) {
267 /* caught copying to/from daemon */
268 BUG_ON(op_state_serviced(op));
269 spin_unlock(&op->lock);
270 wait_for_completion(&op->waitq);
271 } else if (op_state_waiting(op)) {
272 /*
273 * upcall hasn't been read; remove op from upcall request
274 * list.
275 */
276 spin_unlock(&op->lock);
277 spin_lock(&orangefs_request_list_lock);
278 list_del_init(&op->list);
279 spin_unlock(&orangefs_request_list_lock);
280 gossip_debug(GOSSIP_WAIT_DEBUG,
281 "Interrupted: Removed op %p from request_list\n",
282 op);
283 } else if (op_state_in_progress(op)) {
284 /* op must be removed from the in progress htable */
285 spin_unlock(&op->lock);
286 spin_lock(&orangefs_htable_ops_in_progress_lock);
287 list_del_init(&op->list);
288 spin_unlock(&orangefs_htable_ops_in_progress_lock);
289 gossip_debug(GOSSIP_WAIT_DEBUG,
290 "Interrupted: Removed op %p"
291 " from htable_ops_in_progress\n",
292 op);
293 } else {
294 spin_unlock(&op->lock);
295 gossip_err("interrupted operation is in a weird state 0x%x\n",
296 op->op_state);
297 }
298 reinit_completion(&op->waitq);
299}
300
301/*
302 * Sleeps on waitqueue waiting for matching downcall.
303 * If client-core finishes servicing, then we are good to go.
304 * else if client-core exits, we get woken up here, and retry with a timeout
305 *
306 * When this call returns to the caller, the specified op will no
307 * longer be in either the in_progress hash table or on the request list.
308 *
309 * Returns 0 on success and -errno on failure
310 * Errors are:
311 * EAGAIN in case we want the caller to requeue and try again..
312 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
313 * operation since client-core seems to be exiting too often
314 * or if we were interrupted.
315 *
316 * Returns with op->lock taken.
317 */
318static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
319 long timeout,
320 int flags)
321 __acquires(op->lock)
322{
323 long n;
324 int writeback = flags & ORANGEFS_OP_WRITEBACK,
325 interruptible = flags & ORANGEFS_OP_INTERRUPTIBLE;
326
327 /*
328 * There's a "schedule_timeout" inside of these wait
329 * primitives, during which the op is out of the hands of the
330 * user process that needs something done and is being
331 * manipulated by the client-core process.
332 */
333 if (writeback)
334 n = wait_for_completion_io_timeout(&op->waitq, timeout);
335 else if (!writeback && interruptible)
336 n = wait_for_completion_interruptible_timeout(&op->waitq,
337 timeout);
338 else /* !writeback && !interruptible but compiler complains */
339 n = wait_for_completion_killable_timeout(&op->waitq, timeout);
340
341 spin_lock(&op->lock);
342
343 if (op_state_serviced(op))
344 return 0;
345
346 if (unlikely(n < 0)) {
347 gossip_debug(GOSSIP_WAIT_DEBUG,
348 "%s: operation interrupted, tag %llu, %p\n",
349 __func__,
350 llu(op->tag),
351 op);
352 return -EINTR;
353 }
354 if (op_state_purged(op)) {
355 gossip_debug(GOSSIP_WAIT_DEBUG,
356 "%s: operation purged, tag %llu, %p, %d\n",
357 __func__,
358 llu(op->tag),
359 op,
360 op->attempts);
361 return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
362 -EAGAIN :
363 -EIO;
364 }
365 /* must have timed out, then... */
366 gossip_debug(GOSSIP_WAIT_DEBUG,
367 "%s: operation timed out, tag %llu, %p, %d)\n",
368 __func__,
369 llu(op->tag),
370 op,
371 op->attempts);
372 return -ETIMEDOUT;
373}
1/*
2 * (C) 2001 Clemson University and The University of Chicago
3 * (C) 2011 Omnibond Systems
4 *
5 * Changes by Acxiom Corporation to implement generic service_operation()
6 * function, Copyright Acxiom Corporation, 2005.
7 *
8 * See COPYING in top-level directory.
9 */
10
11/*
12 * In-kernel waitqueue operations.
13 */
14
15#include "protocol.h"
16#include "orangefs-kernel.h"
17#include "orangefs-bufmap.h"
18
19static int wait_for_matching_downcall(struct orangefs_kernel_op_s *, long, bool);
20static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *);
21
22/*
23 * What we do in this function is to walk the list of operations that are
24 * present in the request queue and mark them as purged.
25 * NOTE: This is called from the device close after client-core has
26 * guaranteed that no new operations could appear on the list since the
27 * client-core is anyway going to exit.
28 */
29void purge_waiting_ops(void)
30{
31 struct orangefs_kernel_op_s *op;
32
33 spin_lock(&orangefs_request_list_lock);
34 list_for_each_entry(op, &orangefs_request_list, list) {
35 gossip_debug(GOSSIP_WAIT_DEBUG,
36 "pvfs2-client-core: purging op tag %llu %s\n",
37 llu(op->tag),
38 get_opname_string(op));
39 set_op_state_purged(op);
40 gossip_debug(GOSSIP_DEV_DEBUG,
41 "%s: op:%s: op_state:%d: process:%s:\n",
42 __func__,
43 get_opname_string(op),
44 op->op_state,
45 current->comm);
46 }
47 spin_unlock(&orangefs_request_list_lock);
48}
49
50/*
51 * submits a ORANGEFS operation and waits for it to complete
52 *
53 * Note op->downcall.status will contain the status of the operation (in
54 * errno format), whether provided by pvfs2-client or a result of failure to
55 * service the operation. If the caller wishes to distinguish, then
56 * op->state can be checked to see if it was serviced or not.
57 *
58 * Returns contents of op->downcall.status for convenience
59 */
60int service_operation(struct orangefs_kernel_op_s *op,
61 const char *op_name,
62 int flags)
63{
64 long timeout = MAX_SCHEDULE_TIMEOUT;
65 int ret = 0;
66
67 DEFINE_WAIT(wait_entry);
68
69 op->upcall.tgid = current->tgid;
70 op->upcall.pid = current->pid;
71
72retry_servicing:
73 op->downcall.status = 0;
74 gossip_debug(GOSSIP_WAIT_DEBUG,
75 "%s: %s op:%p: process:%s: pid:%d:\n",
76 __func__,
77 op_name,
78 op,
79 current->comm,
80 current->pid);
81
82 /*
83 * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid
84 * acquiring the request_mutex because we're servicing a
85 * high priority remount operation and the request_mutex is
86 * already taken.
87 */
88 if (!(flags & ORANGEFS_OP_NO_MUTEX)) {
89 if (flags & ORANGEFS_OP_INTERRUPTIBLE)
90 ret = mutex_lock_interruptible(&request_mutex);
91 else
92 ret = mutex_lock_killable(&request_mutex);
93 /*
94 * check to see if we were interrupted while waiting for
95 * mutex
96 */
97 if (ret < 0) {
98 op->downcall.status = ret;
99 gossip_debug(GOSSIP_WAIT_DEBUG,
100 "%s: service_operation interrupted.\n",
101 __func__);
102 return ret;
103 }
104 }
105
106 /* queue up the operation */
107 spin_lock(&orangefs_request_list_lock);
108 spin_lock(&op->lock);
109 set_op_state_waiting(op);
110 gossip_debug(GOSSIP_DEV_DEBUG,
111 "%s: op:%s: op_state:%d: process:%s:\n",
112 __func__,
113 get_opname_string(op),
114 op->op_state,
115 current->comm);
116 /* add high priority remount op to the front of the line. */
117 if (flags & ORANGEFS_OP_PRIORITY)
118 list_add(&op->list, &orangefs_request_list);
119 else
120 list_add_tail(&op->list, &orangefs_request_list);
121 spin_unlock(&op->lock);
122 wake_up_interruptible(&orangefs_request_list_waitq);
123 if (!__is_daemon_in_service()) {
124 gossip_debug(GOSSIP_WAIT_DEBUG,
125 "%s:client core is NOT in service.\n",
126 __func__);
127 timeout = op_timeout_secs * HZ;
128 }
129 spin_unlock(&orangefs_request_list_lock);
130
131 if (!(flags & ORANGEFS_OP_NO_MUTEX))
132 mutex_unlock(&request_mutex);
133
134 ret = wait_for_matching_downcall(op, timeout,
135 flags & ORANGEFS_OP_INTERRUPTIBLE);
136
137 gossip_debug(GOSSIP_WAIT_DEBUG,
138 "%s: wait_for_matching_downcall returned %d for %p\n",
139 __func__,
140 ret,
141 op);
142
143 /* got matching downcall; make sure status is in errno format */
144 if (!ret) {
145 spin_unlock(&op->lock);
146 op->downcall.status =
147 orangefs_normalize_to_errno(op->downcall.status);
148 ret = op->downcall.status;
149 goto out;
150 }
151
152 /* failed to get matching downcall */
153 if (ret == -ETIMEDOUT) {
154 gossip_err("%s: %s -- wait timed out; aborting attempt.\n",
155 __func__,
156 op_name);
157 }
158
159 /*
160 * remove a waiting op from the request list or
161 * remove an in-progress op from the in-progress list.
162 */
163 orangefs_clean_up_interrupted_operation(op);
164
165 op->downcall.status = ret;
166 /* retry if operation has not been serviced and if requested */
167 if (ret == -EAGAIN) {
168 op->attempts++;
169 timeout = op_timeout_secs * HZ;
170 gossip_debug(GOSSIP_WAIT_DEBUG,
171 "orangefs: tag %llu (%s)"
172 " -- operation to be retried (%d attempt)\n",
173 llu(op->tag),
174 op_name,
175 op->attempts);
176
177 /*
178 * io ops (ops that use the shared memory buffer) have
179 * to be returned to their caller for a retry. Other ops
180 * can just be recycled here.
181 */
182 if (!op->uses_shared_memory)
183 goto retry_servicing;
184 }
185
186out:
187 gossip_debug(GOSSIP_WAIT_DEBUG,
188 "%s: %s returning: %d for %p.\n",
189 __func__,
190 op_name,
191 ret,
192 op);
193 return ret;
194}
195
196/* This can get called on an I/O op if it had a bad service_operation. */
197bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op)
198{
199 u64 tag = op->tag;
200 if (!op_state_in_progress(op))
201 return false;
202
203 op->slot_to_free = op->upcall.req.io.buf_index;
204 memset(&op->upcall, 0, sizeof(op->upcall));
205 memset(&op->downcall, 0, sizeof(op->downcall));
206 op->upcall.type = ORANGEFS_VFS_OP_CANCEL;
207 op->upcall.req.cancel.op_tag = tag;
208 op->downcall.type = ORANGEFS_VFS_OP_INVALID;
209 op->downcall.status = -1;
210 orangefs_new_tag(op);
211
212 spin_lock(&orangefs_request_list_lock);
213 /* orangefs_request_list_lock is enough of a barrier here */
214 if (!__is_daemon_in_service()) {
215 spin_unlock(&orangefs_request_list_lock);
216 return false;
217 }
218 spin_lock(&op->lock);
219 set_op_state_waiting(op);
220 gossip_debug(GOSSIP_DEV_DEBUG,
221 "%s: op:%s: op_state:%d: process:%s:\n",
222 __func__,
223 get_opname_string(op),
224 op->op_state,
225 current->comm);
226 list_add(&op->list, &orangefs_request_list);
227 spin_unlock(&op->lock);
228 spin_unlock(&orangefs_request_list_lock);
229
230 gossip_debug(GOSSIP_WAIT_DEBUG,
231 "Attempting ORANGEFS operation cancellation of tag %llu\n",
232 llu(tag));
233 return true;
234}
235
236/*
237 * Change an op to the "given up" state and remove it from its list.
238 */
239static void
240 orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
241{
242 /*
243 * handle interrupted cases depending on what state we were in when
244 * the interruption is detected.
245 *
246 * Called with op->lock held.
247 */
248
249 /*
250 * List manipulation code elsewhere will ignore ops that
251 * have been given up upon.
252 */
253 op->op_state |= OP_VFS_STATE_GIVEN_UP;
254
255 if (list_empty(&op->list)) {
256 /* caught copying to/from daemon */
257 BUG_ON(op_state_serviced(op));
258 spin_unlock(&op->lock);
259 wait_for_completion(&op->waitq);
260 } else if (op_state_waiting(op)) {
261 /*
262 * upcall hasn't been read; remove op from upcall request
263 * list.
264 */
265 spin_unlock(&op->lock);
266 spin_lock(&orangefs_request_list_lock);
267 list_del_init(&op->list);
268 spin_unlock(&orangefs_request_list_lock);
269 gossip_debug(GOSSIP_WAIT_DEBUG,
270 "Interrupted: Removed op %p from request_list\n",
271 op);
272 } else if (op_state_in_progress(op)) {
273 /* op must be removed from the in progress htable */
274 spin_unlock(&op->lock);
275 spin_lock(&htable_ops_in_progress_lock);
276 list_del_init(&op->list);
277 spin_unlock(&htable_ops_in_progress_lock);
278 gossip_debug(GOSSIP_WAIT_DEBUG,
279 "Interrupted: Removed op %p"
280 " from htable_ops_in_progress\n",
281 op);
282 } else {
283 spin_unlock(&op->lock);
284 gossip_err("interrupted operation is in a weird state 0x%x\n",
285 op->op_state);
286 }
287 reinit_completion(&op->waitq);
288}
289
290/*
291 * Sleeps on waitqueue waiting for matching downcall.
292 * If client-core finishes servicing, then we are good to go.
293 * else if client-core exits, we get woken up here, and retry with a timeout
294 *
295 * When this call returns to the caller, the specified op will no
296 * longer be in either the in_progress hash table or on the request list.
297 *
298 * Returns 0 on success and -errno on failure
299 * Errors are:
300 * EAGAIN in case we want the caller to requeue and try again..
301 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
302 * operation since client-core seems to be exiting too often
303 * or if we were interrupted.
304 *
305 * Returns with op->lock taken.
306 */
307static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
308 long timeout,
309 bool interruptible)
310{
311 long n;
312
313 /*
314 * There's a "schedule_timeout" inside of these wait
315 * primitives, during which the op is out of the hands of the
316 * user process that needs something done and is being
317 * manipulated by the client-core process.
318 */
319 if (interruptible)
320 n = wait_for_completion_interruptible_timeout(&op->waitq,
321 timeout);
322 else
323 n = wait_for_completion_killable_timeout(&op->waitq, timeout);
324
325 spin_lock(&op->lock);
326
327 if (op_state_serviced(op))
328 return 0;
329
330 if (unlikely(n < 0)) {
331 gossip_debug(GOSSIP_WAIT_DEBUG,
332 "%s: operation interrupted, tag %llu, %p\n",
333 __func__,
334 llu(op->tag),
335 op);
336 return -EINTR;
337 }
338 if (op_state_purged(op)) {
339 gossip_debug(GOSSIP_WAIT_DEBUG,
340 "%s: operation purged, tag %llu, %p, %d\n",
341 __func__,
342 llu(op->tag),
343 op,
344 op->attempts);
345 return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
346 -EAGAIN :
347 -EIO;
348 }
349 /* must have timed out, then... */
350 gossip_debug(GOSSIP_WAIT_DEBUG,
351 "%s: operation timed out, tag %llu, %p, %d)\n",
352 __func__,
353 llu(op->tag),
354 op,
355 op->attempts);
356 return -ETIMEDOUT;
357}