Linux Audio

Check our new training course

Loading...
v3.1
 
  1/******************************************************************************
  2 * ring.h
  3 *
  4 * Shared producer-consumer ring macros.
  5 *
  6 * Tim Deegan and Andrew Warfield November 2004.
  7 */
  8
  9#ifndef __XEN_PUBLIC_IO_RING_H__
 10#define __XEN_PUBLIC_IO_RING_H__
 11
 
 
 12typedef unsigned int RING_IDX;
 13
 14/* Round a 32-bit unsigned constant down to the nearest power of two. */
 15#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2		       : ((_x) & 0x1))
 16#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
 17#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
 18#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
 19#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
 20
 21/*
 22 * Calculate size of a shared ring, given the total available space for the
 23 * ring and indexes (_sz), and the name tag of the request/response structure.
 24 * A ring contains as many entries as will fit, rounded down to the nearest
 25 * power of two (so we can mask with (size-1) to loop around).
 26 */
 27#define __CONST_RING_SIZE(_s, _sz)				\
 28	(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) /	\
 29		sizeof(((struct _s##_sring *)0)->ring[0])))
 30
 31/*
 32 * The same for passing in an actual pointer instead of a name tag.
 33 */
 34#define __RING_SIZE(_s, _sz)						\
 35	(__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
 36
 37/*
 38 * Macros to make the correct C datatypes for a new kind of ring.
 39 *
 40 * To make a new ring datatype, you need to have two message structures,
 41 * let's say struct request, and struct response already defined.
 42 *
 43 * In a header where you want the ring datatype declared, you then do:
 44 *
 45 *     DEFINE_RING_TYPES(mytag, struct request, struct response);
 46 *
 47 * These expand out to give you a set of types, as you can see below.
 48 * The most important of these are:
 49 *
 50 *     struct mytag_sring      - The shared ring.
 51 *     struct mytag_front_ring - The 'front' half of the ring.
 52 *     struct mytag_back_ring  - The 'back' half of the ring.
 53 *
 54 * To initialize a ring in your code you need to know the location and size
 55 * of the shared memory area (PAGE_SIZE, for instance). To initialise
 56 * the front half:
 57 *
 58 *     struct mytag_front_ring front_ring;
 59 *     SHARED_RING_INIT((struct mytag_sring *)shared_page);
 60 *     FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
 61 *		       PAGE_SIZE);
 62 *
 63 * Initializing the back follows similarly (note that only the front
 64 * initializes the shared ring):
 65 *
 66 *     struct mytag_back_ring back_ring;
 67 *     BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
 68 *		      PAGE_SIZE);
 69 */
 70
 71#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)			\
 72									\
 73/* Shared ring entry */							\
 74union __name##_sring_entry {						\
 75    __req_t req;							\
 76    __rsp_t rsp;							\
 77};									\
 78									\
 79/* Shared ring page */							\
 80struct __name##_sring {							\
 81    RING_IDX req_prod, req_event;					\
 82    RING_IDX rsp_prod, rsp_event;					\
 83    uint8_t  pad[48];							\
 84    union __name##_sring_entry ring[1]; /* variable-length */		\
 85};									\
 86									\
 87/* "Front" end's private variables */					\
 88struct __name##_front_ring {						\
 89    RING_IDX req_prod_pvt;						\
 90    RING_IDX rsp_cons;							\
 91    unsigned int nr_ents;						\
 92    struct __name##_sring *sring;					\
 93};									\
 94									\
 95/* "Back" end's private variables */					\
 96struct __name##_back_ring {						\
 97    RING_IDX rsp_prod_pvt;						\
 98    RING_IDX req_cons;							\
 99    unsigned int nr_ents;						\
100    struct __name##_sring *sring;					\
101};
102
103/*
104 * Macros for manipulating rings.
105 *
106 * FRONT_RING_whatever works on the "front end" of a ring: here
107 * requests are pushed on to the ring and responses taken off it.
108 *
109 * BACK_RING_whatever works on the "back end" of a ring: here
110 * requests are taken off the ring and responses put on.
111 *
112 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
113 * This is OK in 1-for-1 request-response situations where the
114 * requestor (front end) never has more than RING_SIZE()-1
115 * outstanding requests.
116 */
117
118/* Initialising empty rings */
119#define SHARED_RING_INIT(_s) do {					\
120    (_s)->req_prod  = (_s)->rsp_prod  = 0;				\
121    (_s)->req_event = (_s)->rsp_event = 1;				\
122    memset((_s)->pad, 0, sizeof((_s)->pad));				\
123} while(0)
124
125#define FRONT_RING_INIT(_r, _s, __size) do {				\
126    (_r)->req_prod_pvt = 0;						\
127    (_r)->rsp_cons = 0;							\
128    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
129    (_r)->sring = (_s);							\
130} while (0)
131
132#define BACK_RING_INIT(_r, _s, __size) do {				\
133    (_r)->rsp_prod_pvt = 0;						\
134    (_r)->req_cons = 0;							\
135    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
136    (_r)->sring = (_s);							\
137} while (0)
138
139/* Initialize to existing shared indexes -- for recovery */
140#define FRONT_RING_ATTACH(_r, _s, __size) do {				\
141    (_r)->sring = (_s);							\
142    (_r)->req_prod_pvt = (_s)->req_prod;				\
143    (_r)->rsp_cons = (_s)->rsp_prod;					\
144    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
145} while (0)
146
147#define BACK_RING_ATTACH(_r, _s, __size) do {				\
148    (_r)->sring = (_s);							\
149    (_r)->rsp_prod_pvt = (_s)->rsp_prod;				\
150    (_r)->req_cons = (_s)->req_prod;					\
151    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
152} while (0)
153
 
 
154/* How big is this ring? */
155#define RING_SIZE(_r)							\
156    ((_r)->nr_ents)
157
158/* Number of free requests (for use on front side only). */
159#define RING_FREE_REQUESTS(_r)						\
160    (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
161
162/* Test if there is an empty slot available on the front ring.
163 * (This is only meaningful from the front. )
164 */
165#define RING_FULL(_r)							\
166    (RING_FREE_REQUESTS(_r) == 0)
167
168/* Test if there are outstanding messages to be processed on a ring. */
169#define RING_HAS_UNCONSUMED_RESPONSES(_r)				\
170    ((_r)->sring->rsp_prod - (_r)->rsp_cons)
171
172#define RING_HAS_UNCONSUMED_REQUESTS(_r)				\
173    ({									\
174	unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;	\
175	unsigned int rsp = RING_SIZE(_r) -				\
176			   ((_r)->req_cons - (_r)->rsp_prod_pvt);	\
177	req < rsp ? req : rsp;						\
178    })
179
180/* Direct access to individual ring elements, by index. */
181#define RING_GET_REQUEST(_r, _idx)					\
182    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184#define RING_GET_RESPONSE(_r, _idx)					\
185    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
186
187/* Loop termination condition: Would the specified index overflow the ring? */
188#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)				\
189    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
190
 
 
 
 
 
191#define RING_PUSH_REQUESTS(_r) do {					\
192    wmb(); /* back sees requests /before/ updated producer index */	\
193    (_r)->sring->req_prod = (_r)->req_prod_pvt;				\
194} while (0)
195
196#define RING_PUSH_RESPONSES(_r) do {					\
197    wmb(); /* front sees responses /before/ updated producer index */	\
198    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;				\
199} while (0)
200
201/*
202 * Notification hold-off (req_event and rsp_event):
203 *
204 * When queueing requests or responses on a shared ring, it may not always be
205 * necessary to notify the remote end. For example, if requests are in flight
206 * in a backend, the front may be able to queue further requests without
207 * notifying the back (if the back checks for new requests when it queues
208 * responses).
209 *
210 * When enqueuing requests or responses:
211 *
212 *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
213 *  is a boolean return value. True indicates that the receiver requires an
214 *  asynchronous notification.
215 *
216 * After dequeuing requests or responses (before sleeping the connection):
217 *
218 *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
219 *  The second argument is a boolean return value. True indicates that there
220 *  are pending messages on the ring (i.e., the connection should not be put
221 *  to sleep).
222 *
223 *  These macros will set the req_event/rsp_event field to trigger a
224 *  notification on the very next message that is enqueued. If you want to
225 *  create batches of work (i.e., only receive a notification after several
226 *  messages have been enqueued) then you will need to create a customised
227 *  version of the FINAL_CHECK macro in your own code, which sets the event
228 *  field appropriately.
229 */
230
231#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {		\
232    RING_IDX __old = (_r)->sring->req_prod;				\
233    RING_IDX __new = (_r)->req_prod_pvt;				\
234    wmb(); /* back sees requests /before/ updated producer index */	\
235    (_r)->sring->req_prod = __new;					\
236    mb(); /* back sees new requests /before/ we check req_event */	\
237    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <		\
238		 (RING_IDX)(__new - __old));				\
239} while (0)
240
241#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {		\
242    RING_IDX __old = (_r)->sring->rsp_prod;				\
243    RING_IDX __new = (_r)->rsp_prod_pvt;				\
244    wmb(); /* front sees responses /before/ updated producer index */	\
245    (_r)->sring->rsp_prod = __new;					\
246    mb(); /* front sees new responses /before/ we check rsp_event */	\
247    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <		\
248		 (RING_IDX)(__new - __old));				\
249} while (0)
250
251#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {		\
252    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\
253    if (_work_to_do) break;						\
254    (_r)->sring->req_event = (_r)->req_cons + 1;			\
255    mb();								\
256    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\
257} while (0)
258
259#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {		\
260    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\
261    if (_work_to_do) break;						\
262    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;			\
263    mb();								\
264    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\
265} while (0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
267#endif /* __XEN_PUBLIC_IO_RING_H__ */
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/******************************************************************************
  3 * ring.h
  4 *
  5 * Shared producer-consumer ring macros.
  6 *
  7 * Tim Deegan and Andrew Warfield November 2004.
  8 */
  9
 10#ifndef __XEN_PUBLIC_IO_RING_H__
 11#define __XEN_PUBLIC_IO_RING_H__
 12
 13#include <xen/interface/grant_table.h>
 14
 15typedef unsigned int RING_IDX;
 16
 17/* Round a 32-bit unsigned constant down to the nearest power of two. */
 18#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2		       : ((_x) & 0x1))
 19#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
 20#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
 21#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
 22#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
 23
 24/*
 25 * Calculate size of a shared ring, given the total available space for the
 26 * ring and indexes (_sz), and the name tag of the request/response structure.
 27 * A ring contains as many entries as will fit, rounded down to the nearest
 28 * power of two (so we can mask with (size-1) to loop around).
 29 */
 30#define __CONST_RING_SIZE(_s, _sz)				\
 31	(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) /	\
 32		sizeof(((struct _s##_sring *)0)->ring[0])))
 33
 34/*
 35 * The same for passing in an actual pointer instead of a name tag.
 36 */
 37#define __RING_SIZE(_s, _sz)						\
 38	(__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
 39
 40/*
 41 * Macros to make the correct C datatypes for a new kind of ring.
 42 *
 43 * To make a new ring datatype, you need to have two message structures,
 44 * let's say struct request, and struct response already defined.
 45 *
 46 * In a header where you want the ring datatype declared, you then do:
 47 *
 48 *     DEFINE_RING_TYPES(mytag, struct request, struct response);
 49 *
 50 * These expand out to give you a set of types, as you can see below.
 51 * The most important of these are:
 52 *
 53 *     struct mytag_sring      - The shared ring.
 54 *     struct mytag_front_ring - The 'front' half of the ring.
 55 *     struct mytag_back_ring  - The 'back' half of the ring.
 56 *
 57 * To initialize a ring in your code you need to know the location and size
 58 * of the shared memory area (PAGE_SIZE, for instance). To initialise
 59 * the front half:
 60 *
 61 *     struct mytag_front_ring front_ring;
 62 *     SHARED_RING_INIT((struct mytag_sring *)shared_page);
 63 *     FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
 64 *		       PAGE_SIZE);
 65 *
 66 * Initializing the back follows similarly (note that only the front
 67 * initializes the shared ring):
 68 *
 69 *     struct mytag_back_ring back_ring;
 70 *     BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
 71 *		      PAGE_SIZE);
 72 */
 73
 74#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)			\
 75									\
 76/* Shared ring entry */							\
 77union __name##_sring_entry {						\
 78    __req_t req;							\
 79    __rsp_t rsp;							\
 80};									\
 81									\
 82/* Shared ring page */							\
 83struct __name##_sring {							\
 84    RING_IDX req_prod, req_event;					\
 85    RING_IDX rsp_prod, rsp_event;					\
 86    uint8_t  pad[48];							\
 87    union __name##_sring_entry ring[1]; /* variable-length */		\
 88};									\
 89									\
 90/* "Front" end's private variables */					\
 91struct __name##_front_ring {						\
 92    RING_IDX req_prod_pvt;						\
 93    RING_IDX rsp_cons;							\
 94    unsigned int nr_ents;						\
 95    struct __name##_sring *sring;					\
 96};									\
 97									\
 98/* "Back" end's private variables */					\
 99struct __name##_back_ring {						\
100    RING_IDX rsp_prod_pvt;						\
101    RING_IDX req_cons;							\
102    unsigned int nr_ents;						\
103    struct __name##_sring *sring;					\
104};
105
106/*
107 * Macros for manipulating rings.
108 *
109 * FRONT_RING_whatever works on the "front end" of a ring: here
110 * requests are pushed on to the ring and responses taken off it.
111 *
112 * BACK_RING_whatever works on the "back end" of a ring: here
113 * requests are taken off the ring and responses put on.
114 *
115 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
116 * This is OK in 1-for-1 request-response situations where the
117 * requestor (front end) never has more than RING_SIZE()-1
118 * outstanding requests.
119 */
120
121/* Initialising empty rings */
122#define SHARED_RING_INIT(_s) do {					\
123    (_s)->req_prod  = (_s)->rsp_prod  = 0;				\
124    (_s)->req_event = (_s)->rsp_event = 1;				\
125    memset((_s)->pad, 0, sizeof((_s)->pad));				\
126} while(0)
127
128#define FRONT_RING_ATTACH(_r, _s, _i, __size) do {			\
129    (_r)->req_prod_pvt = (_i);						\
130    (_r)->rsp_cons = (_i);						\
131    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
132    (_r)->sring = (_s);							\
133} while (0)
134
135#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
 
 
 
 
 
136
137#define BACK_RING_ATTACH(_r, _s, _i, __size) do {			\
138    (_r)->rsp_prod_pvt = (_i);						\
139    (_r)->req_cons = (_i);						\
 
 
140    (_r)->nr_ents = __RING_SIZE(_s, __size);				\
 
 
 
141    (_r)->sring = (_s);							\
 
 
 
142} while (0)
143
144#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
145
146/* How big is this ring? */
147#define RING_SIZE(_r)							\
148    ((_r)->nr_ents)
149
150/* Number of free requests (for use on front side only). */
151#define RING_FREE_REQUESTS(_r)						\
152    (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
153
154/* Test if there is an empty slot available on the front ring.
155 * (This is only meaningful from the front. )
156 */
157#define RING_FULL(_r)							\
158    (RING_FREE_REQUESTS(_r) == 0)
159
160/* Test if there are outstanding messages to be processed on a ring. */
161#define RING_HAS_UNCONSUMED_RESPONSES(_r)				\
162    ((_r)->sring->rsp_prod - (_r)->rsp_cons)
163
164#define RING_HAS_UNCONSUMED_REQUESTS(_r)				\
165    ({									\
166	unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;	\
167	unsigned int rsp = RING_SIZE(_r) -				\
168			   ((_r)->req_cons - (_r)->rsp_prod_pvt);	\
169	req < rsp ? req : rsp;						\
170    })
171
172/* Direct access to individual ring elements, by index. */
173#define RING_GET_REQUEST(_r, _idx)					\
174    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
175
176/*
177 * Get a local copy of a request.
178 *
179 * Use this in preference to RING_GET_REQUEST() so all processing is
180 * done on a local copy that cannot be modified by the other end.
181 *
182 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
183 * to be ineffective where _req is a struct which consists of only bitfields.
184 */
185#define RING_COPY_REQUEST(_r, _idx, _req) do {				\
186	/* Use volatile to force the copy into _req. */			\
187	*(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx);	\
188} while (0)
189
190#define RING_GET_RESPONSE(_r, _idx)					\
191    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
192
193/* Loop termination condition: Would the specified index overflow the ring? */
194#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)				\
195    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
196
197/* Ill-behaved frontend determination: Can there be this many requests? */
198#define RING_REQUEST_PROD_OVERFLOW(_r, _prod)               \
199    (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
200
201
202#define RING_PUSH_REQUESTS(_r) do {					\
203    virt_wmb(); /* back sees requests /before/ updated producer index */	\
204    (_r)->sring->req_prod = (_r)->req_prod_pvt;				\
205} while (0)
206
207#define RING_PUSH_RESPONSES(_r) do {					\
208    virt_wmb(); /* front sees responses /before/ updated producer index */	\
209    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;				\
210} while (0)
211
212/*
213 * Notification hold-off (req_event and rsp_event):
214 *
215 * When queueing requests or responses on a shared ring, it may not always be
216 * necessary to notify the remote end. For example, if requests are in flight
217 * in a backend, the front may be able to queue further requests without
218 * notifying the back (if the back checks for new requests when it queues
219 * responses).
220 *
221 * When enqueuing requests or responses:
222 *
223 *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
224 *  is a boolean return value. True indicates that the receiver requires an
225 *  asynchronous notification.
226 *
227 * After dequeuing requests or responses (before sleeping the connection):
228 *
229 *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
230 *  The second argument is a boolean return value. True indicates that there
231 *  are pending messages on the ring (i.e., the connection should not be put
232 *  to sleep).
233 *
234 *  These macros will set the req_event/rsp_event field to trigger a
235 *  notification on the very next message that is enqueued. If you want to
236 *  create batches of work (i.e., only receive a notification after several
237 *  messages have been enqueued) then you will need to create a customised
238 *  version of the FINAL_CHECK macro in your own code, which sets the event
239 *  field appropriately.
240 */
241
242#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {		\
243    RING_IDX __old = (_r)->sring->req_prod;				\
244    RING_IDX __new = (_r)->req_prod_pvt;				\
245    virt_wmb(); /* back sees requests /before/ updated producer index */	\
246    (_r)->sring->req_prod = __new;					\
247    virt_mb(); /* back sees new requests /before/ we check req_event */	\
248    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <		\
249		 (RING_IDX)(__new - __old));				\
250} while (0)
251
252#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {		\
253    RING_IDX __old = (_r)->sring->rsp_prod;				\
254    RING_IDX __new = (_r)->rsp_prod_pvt;				\
255    virt_wmb(); /* front sees responses /before/ updated producer index */	\
256    (_r)->sring->rsp_prod = __new;					\
257    virt_mb(); /* front sees new responses /before/ we check rsp_event */	\
258    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <		\
259		 (RING_IDX)(__new - __old));				\
260} while (0)
261
262#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {		\
263    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\
264    if (_work_to_do) break;						\
265    (_r)->sring->req_event = (_r)->req_cons + 1;			\
266    virt_mb();								\
267    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);			\
268} while (0)
269
270#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {		\
271    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\
272    if (_work_to_do) break;						\
273    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;			\
274    virt_mb();								\
275    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);			\
276} while (0)
277
278
279/*
280 * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
281 * functions to check if there is data on the ring, and to read and
282 * write to them.
283 *
284 * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
285 * does not define the indexes page. As different protocols can have
286 * extensions to the basic format, this macro allow them to define their
287 * own struct.
288 *
289 * XEN_FLEX_RING_SIZE
290 *   Convenience macro to calculate the size of one of the two rings
291 *   from the overall order.
292 *
293 * $NAME_mask
294 *   Function to apply the size mask to an index, to reduce the index
295 *   within the range [0-size].
296 *
297 * $NAME_read_packet
298 *   Function to read data from the ring. The amount of data to read is
299 *   specified by the "size" argument.
300 *
301 * $NAME_write_packet
302 *   Function to write data to the ring. The amount of data to write is
303 *   specified by the "size" argument.
304 *
305 * $NAME_get_ring_ptr
306 *   Convenience function that returns a pointer to read/write to the
307 *   ring at the right location.
308 *
309 * $NAME_data_intf
310 *   Indexes page, shared between frontend and backend. It also
311 *   contains the array of grant refs.
312 *
313 * $NAME_queued
314 *   Function to calculate how many bytes are currently on the ring,
315 *   ready to be read. It can also be used to calculate how much free
316 *   space is currently on the ring (XEN_FLEX_RING_SIZE() -
317 *   $NAME_queued()).
318 */
319
320#ifndef XEN_PAGE_SHIFT
321/* The PAGE_SIZE for ring protocols and hypercall interfaces is always
322 * 4K, regardless of the architecture, and page granularity chosen by
323 * operating systems.
324 */
325#define XEN_PAGE_SHIFT 12
326#endif
327#define XEN_FLEX_RING_SIZE(order)                                             \
328    (1UL << ((order) + XEN_PAGE_SHIFT - 1))
329
330#define DEFINE_XEN_FLEX_RING(name)                                            \
331static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size)          \
332{                                                                             \
333    return idx & (ring_size - 1);                                             \
334}                                                                             \
335                                                                              \
336static inline unsigned char *name##_get_ring_ptr(unsigned char *buf,          \
337                                                 RING_IDX idx,                \
338                                                 RING_IDX ring_size)          \
339{                                                                             \
340    return buf + name##_mask(idx, ring_size);                                 \
341}                                                                             \
342                                                                              \
343static inline void name##_read_packet(void *opaque,                           \
344                                      const unsigned char *buf,               \
345                                      size_t size,                            \
346                                      RING_IDX masked_prod,                   \
347                                      RING_IDX *masked_cons,                  \
348                                      RING_IDX ring_size)                     \
349{                                                                             \
350    if (*masked_cons < masked_prod ||                                         \
351        size <= ring_size - *masked_cons) {                                   \
352        memcpy(opaque, buf + *masked_cons, size);                             \
353    } else {                                                                  \
354        memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons);         \
355        memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf,       \
356               size - (ring_size - *masked_cons));                            \
357    }                                                                         \
358    *masked_cons = name##_mask(*masked_cons + size, ring_size);               \
359}                                                                             \
360                                                                              \
361static inline void name##_write_packet(unsigned char *buf,                    \
362                                       const void *opaque,                    \
363                                       size_t size,                           \
364                                       RING_IDX *masked_prod,                 \
365                                       RING_IDX masked_cons,                  \
366                                       RING_IDX ring_size)                    \
367{                                                                             \
368    if (*masked_prod < masked_cons ||                                         \
369        size <= ring_size - *masked_prod) {                                   \
370        memcpy(buf + *masked_prod, opaque, size);                             \
371    } else {                                                                  \
372        memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod);         \
373        memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod),     \
374               size - (ring_size - *masked_prod));                            \
375    }                                                                         \
376    *masked_prod = name##_mask(*masked_prod + size, ring_size);               \
377}                                                                             \
378                                                                              \
379static inline RING_IDX name##_queued(RING_IDX prod,                           \
380                                     RING_IDX cons,                           \
381                                     RING_IDX ring_size)                      \
382{                                                                             \
383    RING_IDX size;                                                            \
384                                                                              \
385    if (prod == cons)                                                         \
386        return 0;                                                             \
387                                                                              \
388    prod = name##_mask(prod, ring_size);                                      \
389    cons = name##_mask(cons, ring_size);                                      \
390                                                                              \
391    if (prod == cons)                                                         \
392        return ring_size;                                                     \
393                                                                              \
394    if (prod > cons)                                                          \
395        size = prod - cons;                                                   \
396    else                                                                      \
397        size = ring_size - (cons - prod);                                     \
398    return size;                                                              \
399}                                                                             \
400                                                                              \
401struct name##_data {                                                          \
402    unsigned char *in; /* half of the allocation */                           \
403    unsigned char *out; /* half of the allocation */                          \
404}
405
406#define DEFINE_XEN_FLEX_RING_AND_INTF(name)                                   \
407struct name##_data_intf {                                                     \
408    RING_IDX in_cons, in_prod;                                                \
409                                                                              \
410    uint8_t pad1[56];                                                         \
411                                                                              \
412    RING_IDX out_cons, out_prod;                                              \
413                                                                              \
414    uint8_t pad2[56];                                                         \
415                                                                              \
416    RING_IDX ring_order;                                                      \
417    grant_ref_t ref[];                                                        \
418};                                                                            \
419DEFINE_XEN_FLEX_RING(name)
420
421#endif /* __XEN_PUBLIC_IO_RING_H__ */