Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2   drbd_req.h
  3
  4   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  5
  6   Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
  7   Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  8   Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  9
 10   DRBD is free software; you can redistribute it and/or modify
 11   it under the terms of the GNU General Public License as published by
 12   the Free Software Foundation; either version 2, or (at your option)
 13   any later version.
 14
 15   DRBD is distributed in the hope that it will be useful,
 16   but WITHOUT ANY WARRANTY; without even the implied warranty of
 17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 18   GNU General Public License for more details.
 19
 20   You should have received a copy of the GNU General Public License
 21   along with drbd; see the file COPYING.  If not, write to
 22   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
 23 */
 24
 25#ifndef _DRBD_REQ_H
 26#define _DRBD_REQ_H
 27
 28#include <linux/module.h>
 29
 30#include <linux/slab.h>
 31#include <linux/drbd.h>
 32#include "drbd_int.h"
 33#include "drbd_wrappers.h"
 34
 35/* The request callbacks will be called in irq context by the IDE drivers,
 36   and in Softirqs/Tasklets/BH context by the SCSI drivers,
 37   and by the receiver and worker in kernel-thread context.
 38   Try to get the locking right :) */
 39
 40/*
 41 * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
 42 * associated with IO requests originating from the block layer above us.
 43 *
 44 * There are quite a few things that may happen to a drbd request
 45 * during its lifetime.
 46 *
 47 *  It will be created.
 48 *  It will be marked with the intention to be
 49 *    submitted to local disk and/or
 50 *    send via the network.
 51 *
 52 *  It has to be placed on the transfer log and other housekeeping lists,
 53 *  In case we have a network connection.
 54 *
 55 *  It may be identified as a concurrent (write) request
 56 *    and be handled accordingly.
 57 *
 58 *  It may me handed over to the local disk subsystem.
 59 *  It may be completed by the local disk subsystem,
 60 *    either successfully or with io-error.
 61 *  In case it is a READ request, and it failed locally,
 62 *    it may be retried remotely.
 63 *
 64 *  It may be queued for sending.
 65 *  It may be handed over to the network stack,
 66 *    which may fail.
 67 *  It may be acknowledged by the "peer" according to the wire_protocol in use.
 68 *    this may be a negative ack.
 69 *  It may receive a faked ack when the network connection is lost and the
 70 *  transfer log is cleaned up.
 71 *  Sending may be canceled due to network connection loss.
 72 *  When it finally has outlived its time,
 73 *    corresponding dirty bits in the resync-bitmap may be cleared or set,
 74 *    it will be destroyed,
 75 *    and completion will be signalled to the originator,
 76 *      with or without "success".
 77 */
 78
 79enum drbd_req_event {
 80	created,
 81	to_be_send,
 82	to_be_submitted,
 83
 84	/* XXX yes, now I am inconsistent...
 85	 * these are not "events" but "actions"
 86	 * oh, well... */
 87	queue_for_net_write,
 88	queue_for_net_read,
 89	queue_for_send_oos,
 90
 91	send_canceled,
 92	send_failed,
 93	handed_over_to_network,
 94	oos_handed_to_network,
 95	connection_lost_while_pending,
 96	read_retry_remote_canceled,
 97	recv_acked_by_peer,
 98	write_acked_by_peer,
 99	write_acked_by_peer_and_sis, /* and set_in_sync */
100	conflict_discarded_by_peer,
101	neg_acked,
102	barrier_acked, /* in protocol A and B */
103	data_received, /* (remote read) */
104
105	read_completed_with_error,
106	read_ahead_completed_with_error,
107	write_completed_with_error,
108	completed_ok,
109	resend,
110	fail_frozen_disk_io,
111	restart_frozen_disk_io,
112	nothing, /* for tracing only */
 
 
 
 
 
 
 
 
 
 
 
 
 
113};
114
115/* encoding of request states for now.  we don't actually need that many bits.
116 * we don't need to do atomic bit operations either, since most of the time we
117 * need to look at the connection state and/or manipulate some lists at the
118 * same time, so we should hold the request lock anyways.
119 */
120enum drbd_req_state_bits {
121	/* 210
122	 * 000: no local possible
123	 * 001: to be submitted
124	 *    UNUSED, we could map: 011: submitted, completion still pending
125	 * 110: completed ok
126	 * 010: completed with error
 
 
127	 */
128	__RQ_LOCAL_PENDING,
129	__RQ_LOCAL_COMPLETED,
130	__RQ_LOCAL_OK,
 
131
132	/* 76543
133	 * 00000: no network possible
134	 * 00001: to be send
135	 * 00011: to be send, on worker queue
136	 * 00101: sent, expecting recv_ack (B) or write_ack (C)
137	 * 11101: sent,
138	 *        recv_ack (B) or implicit "ack" (A),
139	 *        still waiting for the barrier ack.
140	 *        master_bio may already be completed and invalidated.
141	 * 11100: write_acked (C),
142	 *        data_received (for remote read, any protocol)
143	 *        or finally the barrier ack has arrived (B,A)...
144	 *        request can be freed
145	 * 01100: neg-acked (write, protocol C)
146	 *        or neg-d-acked (read, any protocol)
147	 *        or killed from the transfer log
148	 *        during cleanup after connection loss
149	 *        request can be freed
150	 * 01000: canceled or send failed...
151	 *        request can be freed
152	 */
153
154	/* if "SENT" is not set, yet, this can still fail or be canceled.
155	 * if "SENT" is set already, we still wait for an Ack packet.
156	 * when cleared, the master_bio may be completed.
157	 * in (B,A) the request object may still linger on the transaction log
158	 * until the corresponding barrier ack comes in */
159	__RQ_NET_PENDING,
160
161	/* If it is QUEUED, and it is a WRITE, it is also registered in the
162	 * transfer log. Currently we need this flag to avoid conflicts between
163	 * worker canceling the request and tl_clear_barrier killing it from
164	 * transfer log.  We should restructure the code so this conflict does
165	 * no longer occur. */
166	__RQ_NET_QUEUED,
167
168	/* well, actually only "handed over to the network stack".
169	 *
170	 * TODO can potentially be dropped because of the similar meaning
171	 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
172	 * however it is not exactly the same. before we drop it
173	 * we must ensure that we can tell a request with network part
174	 * from a request without, regardless of what happens to it. */
175	__RQ_NET_SENT,
176
177	/* when set, the request may be freed (if RQ_NET_QUEUED is clear).
178	 * basically this means the corresponding P_BARRIER_ACK was received */
179	__RQ_NET_DONE,
180
181	/* whether or not we know (C) or pretend (B,A) that the write
182	 * was successfully written on the peer.
183	 */
184	__RQ_NET_OK,
185
186	/* peer called drbd_set_in_sync() for this write */
187	__RQ_NET_SIS,
188
189	/* keep this last, its for the RQ_NET_MASK */
190	__RQ_NET_MAX,
191
192	/* Set when this is a write, clear for a read */
193	__RQ_WRITE,
 
 
 
194
195	/* Should call drbd_al_complete_io() for this request... */
196	__RQ_IN_ACT_LOG,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197};
198
199#define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
200#define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
201#define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
 
202
203#define RQ_LOCAL_MASK      ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
204
205#define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
206#define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
207#define RQ_NET_SENT        (1UL << __RQ_NET_SENT)
208#define RQ_NET_DONE        (1UL << __RQ_NET_DONE)
209#define RQ_NET_OK          (1UL << __RQ_NET_OK)
210#define RQ_NET_SIS         (1UL << __RQ_NET_SIS)
211
212/* 0x1f8 */
213#define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
214
215#define RQ_WRITE           (1UL << __RQ_WRITE)
 
 
 
216#define RQ_IN_ACT_LOG      (1UL << __RQ_IN_ACT_LOG)
 
 
 
 
 
 
217
218/* For waking up the frozen transfer log mod_req() has to return if the request
219   should be counted in the epoch object*/
220#define MR_WRITE_SHIFT 0
221#define MR_WRITE       (1 << MR_WRITE_SHIFT)
222#define MR_READ_SHIFT  1
223#define MR_READ        (1 << MR_READ_SHIFT)
224
225/* epoch entries */
226static inline
227struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
228{
229	BUG_ON(mdev->ee_hash_s == 0);
230	return mdev->ee_hash +
231		((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
232}
233
234/* transfer log (drbd_request objects) */
235static inline
236struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
237{
238	BUG_ON(mdev->tl_hash_s == 0);
239	return mdev->tl_hash +
240		((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
241}
242
243/* application reads (drbd_request objects) */
244static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
245{
246	return mdev->app_reads_hash
247		+ ((unsigned int)(sector) % APP_R_HSIZE);
248}
249
250/* when we receive the answer for a read request,
251 * verify that we actually know about it */
252static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
253	u64 id, sector_t sector)
254{
255	struct hlist_head *slot = ar_hash_slot(mdev, sector);
256	struct hlist_node *n;
257	struct drbd_request *req;
258
259	hlist_for_each_entry(req, n, slot, collision) {
260		if ((unsigned long)req == (unsigned long)id) {
261			D_ASSERT(req->sector == sector);
262			return req;
263		}
264	}
265	return NULL;
266}
267
268static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
269{
270	struct bio *bio;
271	bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
272
273	req->private_bio = bio;
274
275	bio->bi_private  = req;
276	bio->bi_end_io   = drbd_endio_pri;
277	bio->bi_next     = NULL;
278}
279
280static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
281	struct bio *bio_src)
282{
283	struct drbd_request *req =
284		mempool_alloc(drbd_request_mempool, GFP_NOIO);
285	if (likely(req)) {
286		drbd_req_make_private_bio(req, bio_src);
287
288		req->rq_state    = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
289		req->mdev        = mdev;
290		req->master_bio  = bio_src;
291		req->epoch       = 0;
292		req->sector      = bio_src->bi_sector;
293		req->size        = bio_src->bi_size;
294		INIT_HLIST_NODE(&req->collision);
295		INIT_LIST_HEAD(&req->tl_requests);
296		INIT_LIST_HEAD(&req->w.list);
297	}
298	return req;
299}
300
301static inline void drbd_req_free(struct drbd_request *req)
302{
303	mempool_free(req, drbd_request_mempool);
304}
305
306static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
307{
308	return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
309}
310
311/* Short lived temporary struct on the stack.
312 * We could squirrel the error to be returned into
313 * bio->bi_size, or similar. But that would be too ugly. */
314struct bio_and_error {
315	struct bio *bio;
316	int error;
317};
318
319extern void _req_may_be_done(struct drbd_request *req,
320		struct bio_and_error *m);
321extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 
322		struct bio_and_error *m);
323extern void complete_master_bio(struct drbd_conf *mdev,
324		struct bio_and_error *m);
325extern void request_timer_fn(unsigned long data);
326extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
 
 
 
 
 
327
328/* use this if you don't want to deal with calling complete_master_bio()
329 * outside the spinlock, e.g. when walking some list on cleanup. */
330static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
 
331{
332	struct drbd_conf *mdev = req->mdev;
333	struct bio_and_error m;
334	int rv;
335
336	/* __req_mod possibly frees req, do not touch req after that! */
337	rv = __req_mod(req, what, &m);
338	if (m.bio)
339		complete_master_bio(mdev, &m);
340
341	return rv;
342}
343
344/* completion of master bio is outside of our spinlock.
345 * We still may or may not be inside some irqs disabled section
346 * of the lower level driver completion callback, so we need to
347 * spin_lock_irqsave here. */
348static inline int req_mod(struct drbd_request *req,
349		enum drbd_req_event what)
 
350{
351	unsigned long flags;
352	struct drbd_conf *mdev = req->mdev;
353	struct bio_and_error m;
354	int rv;
355
356	spin_lock_irqsave(&mdev->req_lock, flags);
357	rv = __req_mod(req, what, &m);
358	spin_unlock_irqrestore(&mdev->req_lock, flags);
359
360	if (m.bio)
361		complete_master_bio(mdev, &m);
362
363	return rv;
364}
365
366static inline bool drbd_should_do_remote(union drbd_state s)
367{
368	return s.pdsk == D_UP_TO_DATE ||
369		(s.pdsk >= D_INCONSISTENT &&
370		 s.conn >= C_WF_BITMAP_T &&
371		 s.conn < C_AHEAD);
372	/* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
373	   That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
374	   states. */
375}
376static inline bool drbd_should_send_oos(union drbd_state s)
377{
378	return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
379	/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
380	   since we enter state C_AHEAD only if proto >= 96 */
381}
382
383#endif
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3   drbd_req.h
  4
  5   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  6
  7   Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
  8   Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  9   Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 11 */
 12
 13#ifndef _DRBD_REQ_H
 14#define _DRBD_REQ_H
 15
 16#include <linux/module.h>
 17
 18#include <linux/slab.h>
 19#include <linux/drbd.h>
 20#include "drbd_int.h"
 
 21
 22/* The request callbacks will be called in irq context by the IDE drivers,
 23   and in Softirqs/Tasklets/BH context by the SCSI drivers,
 24   and by the receiver and worker in kernel-thread context.
 25   Try to get the locking right :) */
 26
 27/*
 28 * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
 29 * associated with IO requests originating from the block layer above us.
 30 *
 31 * There are quite a few things that may happen to a drbd request
 32 * during its lifetime.
 33 *
 34 *  It will be created.
 35 *  It will be marked with the intention to be
 36 *    submitted to local disk and/or
 37 *    send via the network.
 38 *
 39 *  It has to be placed on the transfer log and other housekeeping lists,
 40 *  In case we have a network connection.
 41 *
 42 *  It may be identified as a concurrent (write) request
 43 *    and be handled accordingly.
 44 *
 45 *  It may me handed over to the local disk subsystem.
 46 *  It may be completed by the local disk subsystem,
 47 *    either successfully or with io-error.
 48 *  In case it is a READ request, and it failed locally,
 49 *    it may be retried remotely.
 50 *
 51 *  It may be queued for sending.
 52 *  It may be handed over to the network stack,
 53 *    which may fail.
 54 *  It may be acknowledged by the "peer" according to the wire_protocol in use.
 55 *    this may be a negative ack.
 56 *  It may receive a faked ack when the network connection is lost and the
 57 *  transfer log is cleaned up.
 58 *  Sending may be canceled due to network connection loss.
 59 *  When it finally has outlived its time,
 60 *    corresponding dirty bits in the resync-bitmap may be cleared or set,
 61 *    it will be destroyed,
 62 *    and completion will be signalled to the originator,
 63 *      with or without "success".
 64 */
 65
 66enum drbd_req_event {
 67	CREATED,
 68	TO_BE_SENT,
 69	TO_BE_SUBMITTED,
 70
 71	/* XXX yes, now I am inconsistent...
 72	 * these are not "events" but "actions"
 73	 * oh, well... */
 74	QUEUE_FOR_NET_WRITE,
 75	QUEUE_FOR_NET_READ,
 76	QUEUE_FOR_SEND_OOS,
 77
 78	/* An empty flush is queued as P_BARRIER,
 79	 * which will cause it to complete "successfully",
 80	 * even if the local disk flush failed.
 81	 *
 82	 * Just like "real" requests, empty flushes (blkdev_issue_flush()) will
 83	 * only see an error if neither local nor remote data is reachable. */
 84	QUEUE_AS_DRBD_BARRIER,
 85
 86	SEND_CANCELED,
 87	SEND_FAILED,
 88	HANDED_OVER_TO_NETWORK,
 89	OOS_HANDED_TO_NETWORK,
 90	CONNECTION_LOST_WHILE_PENDING,
 91	READ_RETRY_REMOTE_CANCELED,
 92	RECV_ACKED_BY_PEER,
 93	WRITE_ACKED_BY_PEER,
 94	WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
 95	CONFLICT_RESOLVED,
 96	POSTPONE_WRITE,
 97	NEG_ACKED,
 98	BARRIER_ACKED, /* in protocol A and B */
 99	DATA_RECEIVED, /* (remote read) */
100
101	COMPLETED_OK,
102	READ_COMPLETED_WITH_ERROR,
103	READ_AHEAD_COMPLETED_WITH_ERROR,
104	WRITE_COMPLETED_WITH_ERROR,
105	DISCARD_COMPLETED_NOTSUPP,
106	DISCARD_COMPLETED_WITH_ERROR,
107
108	ABORT_DISK_IO,
109	RESEND,
110	FAIL_FROZEN_DISK_IO,
111	RESTART_FROZEN_DISK_IO,
112	NOTHING,
113};
114
115/* encoding of request states for now.  we don't actually need that many bits.
116 * we don't need to do atomic bit operations either, since most of the time we
117 * need to look at the connection state and/or manipulate some lists at the
118 * same time, so we should hold the request lock anyways.
119 */
120enum drbd_req_state_bits {
121	/* 3210
122	 * 0000: no local possible
123	 * 0001: to be submitted
124	 *    UNUSED, we could map: 011: submitted, completion still pending
125	 * 0110: completed ok
126	 * 0010: completed with error
127	 * 1001: Aborted (before completion)
128	 * 1x10: Aborted and completed -> free
129	 */
130	__RQ_LOCAL_PENDING,
131	__RQ_LOCAL_COMPLETED,
132	__RQ_LOCAL_OK,
133	__RQ_LOCAL_ABORTED,
134
135	/* 87654
136	 * 00000: no network possible
137	 * 00001: to be send
138	 * 00011: to be send, on worker queue
139	 * 00101: sent, expecting recv_ack (B) or write_ack (C)
140	 * 11101: sent,
141	 *        recv_ack (B) or implicit "ack" (A),
142	 *        still waiting for the barrier ack.
143	 *        master_bio may already be completed and invalidated.
144	 * 11100: write acked (C),
145	 *        data received (for remote read, any protocol)
146	 *        or finally the barrier ack has arrived (B,A)...
147	 *        request can be freed
148	 * 01100: neg-acked (write, protocol C)
149	 *        or neg-d-acked (read, any protocol)
150	 *        or killed from the transfer log
151	 *        during cleanup after connection loss
152	 *        request can be freed
153	 * 01000: canceled or send failed...
154	 *        request can be freed
155	 */
156
157	/* if "SENT" is not set, yet, this can still fail or be canceled.
158	 * if "SENT" is set already, we still wait for an Ack packet.
159	 * when cleared, the master_bio may be completed.
160	 * in (B,A) the request object may still linger on the transaction log
161	 * until the corresponding barrier ack comes in */
162	__RQ_NET_PENDING,
163
164	/* If it is QUEUED, and it is a WRITE, it is also registered in the
165	 * transfer log. Currently we need this flag to avoid conflicts between
166	 * worker canceling the request and tl_clear_barrier killing it from
167	 * transfer log.  We should restructure the code so this conflict does
168	 * no longer occur. */
169	__RQ_NET_QUEUED,
170
171	/* well, actually only "handed over to the network stack".
172	 *
173	 * TODO can potentially be dropped because of the similar meaning
174	 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
175	 * however it is not exactly the same. before we drop it
176	 * we must ensure that we can tell a request with network part
177	 * from a request without, regardless of what happens to it. */
178	__RQ_NET_SENT,
179
180	/* when set, the request may be freed (if RQ_NET_QUEUED is clear).
181	 * basically this means the corresponding P_BARRIER_ACK was received */
182	__RQ_NET_DONE,
183
184	/* whether or not we know (C) or pretend (B,A) that the write
185	 * was successfully written on the peer.
186	 */
187	__RQ_NET_OK,
188
189	/* peer called drbd_set_in_sync() for this write */
190	__RQ_NET_SIS,
191
192	/* keep this last, its for the RQ_NET_MASK */
193	__RQ_NET_MAX,
194
195	/* Set when this is a write, clear for a read */
196	__RQ_WRITE,
197	__RQ_WSAME,
198	__RQ_UNMAP,
199	__RQ_ZEROES,
200
201	/* Should call drbd_al_complete_io() for this request... */
202	__RQ_IN_ACT_LOG,
203
204	/* This was the most recent request during some blk_finish_plug()
205	 * or its implicit from-schedule equivalent.
206	 * We may use it as hint to send a P_UNPLUG_REMOTE */
207	__RQ_UNPLUG,
208
209	/* The peer has sent a retry ACK */
210	__RQ_POSTPONED,
211
212	/* would have been completed,
213	 * but was not, because of drbd_suspended() */
214	__RQ_COMPLETION_SUSP,
215
216	/* We expect a receive ACK (wire proto B) */
217	__RQ_EXP_RECEIVE_ACK,
218
219	/* We expect a write ACK (wite proto C) */
220	__RQ_EXP_WRITE_ACK,
221
222	/* waiting for a barrier ack, did an extra kref_get */
223	__RQ_EXP_BARR_ACK,
224};
225
226#define RQ_LOCAL_PENDING   (1UL << __RQ_LOCAL_PENDING)
227#define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
228#define RQ_LOCAL_OK        (1UL << __RQ_LOCAL_OK)
229#define RQ_LOCAL_ABORTED   (1UL << __RQ_LOCAL_ABORTED)
230
231#define RQ_LOCAL_MASK      ((RQ_LOCAL_ABORTED << 1)-1)
232
233#define RQ_NET_PENDING     (1UL << __RQ_NET_PENDING)
234#define RQ_NET_QUEUED      (1UL << __RQ_NET_QUEUED)
235#define RQ_NET_SENT        (1UL << __RQ_NET_SENT)
236#define RQ_NET_DONE        (1UL << __RQ_NET_DONE)
237#define RQ_NET_OK          (1UL << __RQ_NET_OK)
238#define RQ_NET_SIS         (1UL << __RQ_NET_SIS)
239
 
240#define RQ_NET_MASK        (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
241
242#define RQ_WRITE           (1UL << __RQ_WRITE)
243#define RQ_WSAME           (1UL << __RQ_WSAME)
244#define RQ_UNMAP           (1UL << __RQ_UNMAP)
245#define RQ_ZEROES          (1UL << __RQ_ZEROES)
246#define RQ_IN_ACT_LOG      (1UL << __RQ_IN_ACT_LOG)
247#define RQ_UNPLUG          (1UL << __RQ_UNPLUG)
248#define RQ_POSTPONED	   (1UL << __RQ_POSTPONED)
249#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
250#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
251#define RQ_EXP_WRITE_ACK   (1UL << __RQ_EXP_WRITE_ACK)
252#define RQ_EXP_BARR_ACK    (1UL << __RQ_EXP_BARR_ACK)
253
254/* For waking up the frozen transfer log mod_req() has to return if the request
255   should be counted in the epoch object*/
256#define MR_WRITE       1
257#define MR_READ        2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
259/* Short lived temporary struct on the stack.
260 * We could squirrel the error to be returned into
261 * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
262struct bio_and_error {
263	struct bio *bio;
264	int error;
265};
266
267extern void start_new_tl_epoch(struct drbd_connection *connection);
268extern void drbd_req_destroy(struct kref *kref);
269extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
270		struct drbd_peer_device *peer_device,
271		struct bio_and_error *m);
272extern void complete_master_bio(struct drbd_device *device,
273		struct bio_and_error *m);
274extern void request_timer_fn(struct timer_list *t);
275extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
276extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
277extern void tl_abort_disk_io(struct drbd_device *device);
278
279/* this is in drbd_main.c */
280extern void drbd_restart_request(struct drbd_request *req);
281
282/* use this if you don't want to deal with calling complete_master_bio()
283 * outside the spinlock, e.g. when walking some list on cleanup. */
284static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what,
285		struct drbd_peer_device *peer_device)
286{
287	struct drbd_device *device = req->device;
288	struct bio_and_error m;
289	int rv;
290
291	/* __req_mod possibly frees req, do not touch req after that! */
292	rv = __req_mod(req, what, peer_device, &m);
293	if (m.bio)
294		complete_master_bio(device, &m);
295
296	return rv;
297}
298
299/* completion of master bio is outside of our spinlock.
300 * We still may or may not be inside some irqs disabled section
301 * of the lower level driver completion callback, so we need to
302 * spin_lock_irqsave here. */
303static inline int req_mod(struct drbd_request *req,
304		enum drbd_req_event what,
305		struct drbd_peer_device *peer_device)
306{
307	unsigned long flags;
308	struct drbd_device *device = req->device;
309	struct bio_and_error m;
310	int rv;
311
312	spin_lock_irqsave(&device->resource->req_lock, flags);
313	rv = __req_mod(req, what, peer_device, &m);
314	spin_unlock_irqrestore(&device->resource->req_lock, flags);
315
316	if (m.bio)
317		complete_master_bio(device, &m);
318
319	return rv;
320}
321
322extern bool drbd_should_do_remote(union drbd_dev_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
324#endif