Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <linux/errno.h>
12#include <linux/irq.h>
13
14#include <drm/drm_print.h>
15
16#include <xen/xenbus.h>
17#include <xen/events.h>
18#include <xen/grant_table.h>
19
20#include "xen_drm_front.h"
21#include "xen_drm_front_evtchnl.h"
22
23static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
24{
25 struct xen_drm_front_evtchnl *evtchnl = dev_id;
26 struct xen_drm_front_info *front_info = evtchnl->front_info;
27 struct xendispl_resp *resp;
28 RING_IDX i, rp;
29 unsigned long flags;
30
31 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
32 return IRQ_HANDLED;
33
34 spin_lock_irqsave(&front_info->io_lock, flags);
35
36again:
37 rp = evtchnl->u.req.ring.sring->rsp_prod;
38 /* ensure we see queued responses up to rp */
39 virt_rmb();
40
41 for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
42 resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
43 if (unlikely(resp->id != evtchnl->evt_id))
44 continue;
45
46 switch (resp->operation) {
47 case XENDISPL_OP_PG_FLIP:
48 case XENDISPL_OP_FB_ATTACH:
49 case XENDISPL_OP_FB_DETACH:
50 case XENDISPL_OP_DBUF_CREATE:
51 case XENDISPL_OP_DBUF_DESTROY:
52 case XENDISPL_OP_SET_CONFIG:
53 evtchnl->u.req.resp_status = resp->status;
54 complete(&evtchnl->u.req.completion);
55 break;
56
57 default:
58 DRM_ERROR("Operation %d is not supported\n",
59 resp->operation);
60 break;
61 }
62 }
63
64 evtchnl->u.req.ring.rsp_cons = i;
65
66 if (i != evtchnl->u.req.ring.req_prod_pvt) {
67 int more_to_do;
68
69 RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
70 more_to_do);
71 if (more_to_do)
72 goto again;
73 } else {
74 evtchnl->u.req.ring.sring->rsp_event = i + 1;
75 }
76
77 spin_unlock_irqrestore(&front_info->io_lock, flags);
78 return IRQ_HANDLED;
79}
80
81static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
82{
83 struct xen_drm_front_evtchnl *evtchnl = dev_id;
84 struct xen_drm_front_info *front_info = evtchnl->front_info;
85 struct xendispl_event_page *page = evtchnl->u.evt.page;
86 u32 cons, prod;
87 unsigned long flags;
88
89 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
90 return IRQ_HANDLED;
91
92 spin_lock_irqsave(&front_info->io_lock, flags);
93
94 prod = page->in_prod;
95 /* ensure we see ring contents up to prod */
96 virt_rmb();
97 if (prod == page->in_cons)
98 goto out;
99
100 for (cons = page->in_cons; cons != prod; cons++) {
101 struct xendispl_evt *event;
102
103 event = &XENDISPL_IN_RING_REF(page, cons);
104 if (unlikely(event->id != evtchnl->evt_id++))
105 continue;
106
107 switch (event->type) {
108 case XENDISPL_EVT_PG_FLIP:
109 xen_drm_front_on_frame_done(front_info, evtchnl->index,
110 event->op.pg_flip.fb_cookie);
111 break;
112 }
113 }
114 page->in_cons = cons;
115 /* ensure ring contents */
116 virt_wmb();
117
118out:
119 spin_unlock_irqrestore(&front_info->io_lock, flags);
120 return IRQ_HANDLED;
121}
122
123static void evtchnl_free(struct xen_drm_front_info *front_info,
124 struct xen_drm_front_evtchnl *evtchnl)
125{
126 unsigned long page = 0;
127
128 if (evtchnl->type == EVTCHNL_TYPE_REQ)
129 page = (unsigned long)evtchnl->u.req.ring.sring;
130 else if (evtchnl->type == EVTCHNL_TYPE_EVT)
131 page = (unsigned long)evtchnl->u.evt.page;
132 if (!page)
133 return;
134
135 evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
136
137 if (evtchnl->type == EVTCHNL_TYPE_REQ) {
138 /* release all who still waits for response if any */
139 evtchnl->u.req.resp_status = -EIO;
140 complete_all(&evtchnl->u.req.completion);
141 }
142
143 if (evtchnl->irq)
144 unbind_from_irqhandler(evtchnl->irq, evtchnl);
145
146 if (evtchnl->port)
147 xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
148
149 /* end access and free the page */
150 if (evtchnl->gref != GRANT_INVALID_REF)
151 gnttab_end_foreign_access(evtchnl->gref, 0, page);
152
153 memset(evtchnl, 0, sizeof(*evtchnl));
154}
155
156static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
157 struct xen_drm_front_evtchnl *evtchnl,
158 enum xen_drm_front_evtchnl_type type)
159{
160 struct xenbus_device *xb_dev = front_info->xb_dev;
161 unsigned long page;
162 grant_ref_t gref;
163 irq_handler_t handler;
164 int ret;
165
166 memset(evtchnl, 0, sizeof(*evtchnl));
167 evtchnl->type = type;
168 evtchnl->index = index;
169 evtchnl->front_info = front_info;
170 evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
171 evtchnl->gref = GRANT_INVALID_REF;
172
173 page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
174 if (!page) {
175 ret = -ENOMEM;
176 goto fail;
177 }
178
179 if (type == EVTCHNL_TYPE_REQ) {
180 struct xen_displif_sring *sring;
181
182 init_completion(&evtchnl->u.req.completion);
183 mutex_init(&evtchnl->u.req.req_io_lock);
184 sring = (struct xen_displif_sring *)page;
185 SHARED_RING_INIT(sring);
186 FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
187
188 ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
189 if (ret < 0) {
190 evtchnl->u.req.ring.sring = NULL;
191 free_page(page);
192 goto fail;
193 }
194
195 handler = evtchnl_interrupt_ctrl;
196 } else {
197 ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
198 virt_to_gfn((void *)page), 0);
199 if (ret < 0) {
200 free_page(page);
201 goto fail;
202 }
203
204 evtchnl->u.evt.page = (struct xendispl_event_page *)page;
205 gref = ret;
206 handler = evtchnl_interrupt_evt;
207 }
208 evtchnl->gref = gref;
209
210 ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
211 if (ret < 0)
212 goto fail;
213
214 ret = bind_evtchn_to_irqhandler(evtchnl->port,
215 handler, 0, xb_dev->devicetype,
216 evtchnl);
217 if (ret < 0)
218 goto fail;
219
220 evtchnl->irq = ret;
221 return 0;
222
223fail:
224 DRM_ERROR("Failed to allocate ring: %d\n", ret);
225 return ret;
226}
227
228int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
229{
230 struct xen_drm_front_cfg *cfg;
231 int ret, conn;
232
233 cfg = &front_info->cfg;
234
235 front_info->evt_pairs =
236 kcalloc(cfg->num_connectors,
237 sizeof(struct xen_drm_front_evtchnl_pair),
238 GFP_KERNEL);
239 if (!front_info->evt_pairs) {
240 ret = -ENOMEM;
241 goto fail;
242 }
243
244 for (conn = 0; conn < cfg->num_connectors; conn++) {
245 ret = evtchnl_alloc(front_info, conn,
246 &front_info->evt_pairs[conn].req,
247 EVTCHNL_TYPE_REQ);
248 if (ret < 0) {
249 DRM_ERROR("Error allocating control channel\n");
250 goto fail;
251 }
252
253 ret = evtchnl_alloc(front_info, conn,
254 &front_info->evt_pairs[conn].evt,
255 EVTCHNL_TYPE_EVT);
256 if (ret < 0) {
257 DRM_ERROR("Error allocating in-event channel\n");
258 goto fail;
259 }
260 }
261 front_info->num_evt_pairs = cfg->num_connectors;
262 return 0;
263
264fail:
265 xen_drm_front_evtchnl_free_all(front_info);
266 return ret;
267}
268
269static int evtchnl_publish(struct xenbus_transaction xbt,
270 struct xen_drm_front_evtchnl *evtchnl,
271 const char *path, const char *node_ring,
272 const char *node_chnl)
273{
274 struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
275 int ret;
276
277 /* write control channel ring reference */
278 ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
279 if (ret < 0) {
280 xenbus_dev_error(xb_dev, ret, "writing ring-ref");
281 return ret;
282 }
283
284 /* write event channel ring reference */
285 ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
286 if (ret < 0) {
287 xenbus_dev_error(xb_dev, ret, "writing event channel");
288 return ret;
289 }
290
291 return 0;
292}
293
294int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
295{
296 struct xenbus_transaction xbt;
297 struct xen_drm_front_cfg *plat_data;
298 int ret, conn;
299
300 plat_data = &front_info->cfg;
301
302again:
303 ret = xenbus_transaction_start(&xbt);
304 if (ret < 0) {
305 xenbus_dev_fatal(front_info->xb_dev, ret,
306 "starting transaction");
307 return ret;
308 }
309
310 for (conn = 0; conn < plat_data->num_connectors; conn++) {
311 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
312 plat_data->connectors[conn].xenstore_path,
313 XENDISPL_FIELD_REQ_RING_REF,
314 XENDISPL_FIELD_REQ_CHANNEL);
315 if (ret < 0)
316 goto fail;
317
318 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
319 plat_data->connectors[conn].xenstore_path,
320 XENDISPL_FIELD_EVT_RING_REF,
321 XENDISPL_FIELD_EVT_CHANNEL);
322 if (ret < 0)
323 goto fail;
324 }
325
326 ret = xenbus_transaction_end(xbt, 0);
327 if (ret < 0) {
328 if (ret == -EAGAIN)
329 goto again;
330
331 xenbus_dev_fatal(front_info->xb_dev, ret,
332 "completing transaction");
333 goto fail_to_end;
334 }
335
336 return 0;
337
338fail:
339 xenbus_transaction_end(xbt, 1);
340
341fail_to_end:
342 xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
343 return ret;
344}
345
346void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
347{
348 int notify;
349
350 evtchnl->u.req.ring.req_prod_pvt++;
351 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
352 if (notify)
353 notify_remote_via_irq(evtchnl->irq);
354}
355
356void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
357 enum xen_drm_front_evtchnl_state state)
358{
359 unsigned long flags;
360 int i;
361
362 if (!front_info->evt_pairs)
363 return;
364
365 spin_lock_irqsave(&front_info->io_lock, flags);
366 for (i = 0; i < front_info->num_evt_pairs; i++) {
367 front_info->evt_pairs[i].req.state = state;
368 front_info->evt_pairs[i].evt.state = state;
369 }
370 spin_unlock_irqrestore(&front_info->io_lock, flags);
371}
372
373void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
374{
375 int i;
376
377 if (!front_info->evt_pairs)
378 return;
379
380 for (i = 0; i < front_info->num_evt_pairs; i++) {
381 evtchnl_free(front_info, &front_info->evt_pairs[i].req);
382 evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
383 }
384
385 kfree(front_info->evt_pairs);
386 front_info->evt_pairs = NULL;
387}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <linux/errno.h>
12#include <linux/irq.h>
13
14#include <drm/drm_print.h>
15
16#include <xen/xenbus.h>
17#include <xen/events.h>
18#include <xen/grant_table.h>
19
20#include "xen_drm_front.h"
21#include "xen_drm_front_evtchnl.h"
22
23static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
24{
25 struct xen_drm_front_evtchnl *evtchnl = dev_id;
26 struct xen_drm_front_info *front_info = evtchnl->front_info;
27 struct xendispl_resp *resp;
28 RING_IDX i, rp;
29 unsigned long flags;
30
31 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
32 return IRQ_HANDLED;
33
34 spin_lock_irqsave(&front_info->io_lock, flags);
35
36again:
37 rp = evtchnl->u.req.ring.sring->rsp_prod;
38 /* ensure we see queued responses up to rp */
39 virt_rmb();
40
41 for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
42 resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
43 if (unlikely(resp->id != evtchnl->evt_id))
44 continue;
45
46 switch (resp->operation) {
47 case XENDISPL_OP_PG_FLIP:
48 case XENDISPL_OP_FB_ATTACH:
49 case XENDISPL_OP_FB_DETACH:
50 case XENDISPL_OP_DBUF_CREATE:
51 case XENDISPL_OP_DBUF_DESTROY:
52 case XENDISPL_OP_SET_CONFIG:
53 evtchnl->u.req.resp_status = resp->status;
54 complete(&evtchnl->u.req.completion);
55 break;
56
57 default:
58 DRM_ERROR("Operation %d is not supported\n",
59 resp->operation);
60 break;
61 }
62 }
63
64 evtchnl->u.req.ring.rsp_cons = i;
65
66 if (i != evtchnl->u.req.ring.req_prod_pvt) {
67 int more_to_do;
68
69 RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
70 more_to_do);
71 if (more_to_do)
72 goto again;
73 } else {
74 evtchnl->u.req.ring.sring->rsp_event = i + 1;
75 }
76
77 spin_unlock_irqrestore(&front_info->io_lock, flags);
78 return IRQ_HANDLED;
79}
80
81static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
82{
83 struct xen_drm_front_evtchnl *evtchnl = dev_id;
84 struct xen_drm_front_info *front_info = evtchnl->front_info;
85 struct xendispl_event_page *page = evtchnl->u.evt.page;
86 u32 cons, prod;
87 unsigned long flags;
88
89 if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
90 return IRQ_HANDLED;
91
92 spin_lock_irqsave(&front_info->io_lock, flags);
93
94 prod = page->in_prod;
95 /* ensure we see ring contents up to prod */
96 virt_rmb();
97 if (prod == page->in_cons)
98 goto out;
99
100 for (cons = page->in_cons; cons != prod; cons++) {
101 struct xendispl_evt *event;
102
103 event = &XENDISPL_IN_RING_REF(page, cons);
104 if (unlikely(event->id != evtchnl->evt_id++))
105 continue;
106
107 switch (event->type) {
108 case XENDISPL_EVT_PG_FLIP:
109 xen_drm_front_on_frame_done(front_info, evtchnl->index,
110 event->op.pg_flip.fb_cookie);
111 break;
112 }
113 }
114 page->in_cons = cons;
115 /* ensure ring contents */
116 virt_wmb();
117
118out:
119 spin_unlock_irqrestore(&front_info->io_lock, flags);
120 return IRQ_HANDLED;
121}
122
123static void evtchnl_free(struct xen_drm_front_info *front_info,
124 struct xen_drm_front_evtchnl *evtchnl)
125{
126 void *page = NULL;
127
128 if (evtchnl->type == EVTCHNL_TYPE_REQ)
129 page = evtchnl->u.req.ring.sring;
130 else if (evtchnl->type == EVTCHNL_TYPE_EVT)
131 page = evtchnl->u.evt.page;
132 if (!page)
133 return;
134
135 evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
136
137 if (evtchnl->type == EVTCHNL_TYPE_REQ) {
138 /* release all who still waits for response if any */
139 evtchnl->u.req.resp_status = -EIO;
140 complete_all(&evtchnl->u.req.completion);
141 }
142
143 if (evtchnl->irq)
144 unbind_from_irqhandler(evtchnl->irq, evtchnl);
145
146 if (evtchnl->port)
147 xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
148
149 /* end access and free the page */
150 xenbus_teardown_ring(&page, 1, &evtchnl->gref);
151
152 memset(evtchnl, 0, sizeof(*evtchnl));
153}
154
155static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
156 struct xen_drm_front_evtchnl *evtchnl,
157 enum xen_drm_front_evtchnl_type type)
158{
159 struct xenbus_device *xb_dev = front_info->xb_dev;
160 void *page;
161 irq_handler_t handler;
162 int ret;
163
164 memset(evtchnl, 0, sizeof(*evtchnl));
165 evtchnl->type = type;
166 evtchnl->index = index;
167 evtchnl->front_info = front_info;
168 evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
169
170 ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page,
171 1, &evtchnl->gref);
172 if (ret)
173 goto fail;
174
175 if (type == EVTCHNL_TYPE_REQ) {
176 struct xen_displif_sring *sring;
177
178 init_completion(&evtchnl->u.req.completion);
179 mutex_init(&evtchnl->u.req.req_io_lock);
180 sring = page;
181 XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
182
183 handler = evtchnl_interrupt_ctrl;
184 } else {
185 evtchnl->u.evt.page = page;
186 handler = evtchnl_interrupt_evt;
187 }
188
189 ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
190 if (ret < 0)
191 goto fail;
192
193 ret = bind_evtchn_to_irqhandler(evtchnl->port,
194 handler, 0, xb_dev->devicetype,
195 evtchnl);
196 if (ret < 0)
197 goto fail;
198
199 evtchnl->irq = ret;
200 return 0;
201
202fail:
203 DRM_ERROR("Failed to allocate ring: %d\n", ret);
204 return ret;
205}
206
207int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
208{
209 struct xen_drm_front_cfg *cfg;
210 int ret, conn;
211
212 cfg = &front_info->cfg;
213
214 front_info->evt_pairs =
215 kcalloc(cfg->num_connectors,
216 sizeof(struct xen_drm_front_evtchnl_pair),
217 GFP_KERNEL);
218 if (!front_info->evt_pairs) {
219 ret = -ENOMEM;
220 goto fail;
221 }
222
223 for (conn = 0; conn < cfg->num_connectors; conn++) {
224 ret = evtchnl_alloc(front_info, conn,
225 &front_info->evt_pairs[conn].req,
226 EVTCHNL_TYPE_REQ);
227 if (ret < 0) {
228 DRM_ERROR("Error allocating control channel\n");
229 goto fail;
230 }
231
232 ret = evtchnl_alloc(front_info, conn,
233 &front_info->evt_pairs[conn].evt,
234 EVTCHNL_TYPE_EVT);
235 if (ret < 0) {
236 DRM_ERROR("Error allocating in-event channel\n");
237 goto fail;
238 }
239 }
240 front_info->num_evt_pairs = cfg->num_connectors;
241 return 0;
242
243fail:
244 xen_drm_front_evtchnl_free_all(front_info);
245 return ret;
246}
247
248static int evtchnl_publish(struct xenbus_transaction xbt,
249 struct xen_drm_front_evtchnl *evtchnl,
250 const char *path, const char *node_ring,
251 const char *node_chnl)
252{
253 struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
254 int ret;
255
256 /* write control channel ring reference */
257 ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
258 if (ret < 0) {
259 xenbus_dev_error(xb_dev, ret, "writing ring-ref");
260 return ret;
261 }
262
263 /* write event channel ring reference */
264 ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
265 if (ret < 0) {
266 xenbus_dev_error(xb_dev, ret, "writing event channel");
267 return ret;
268 }
269
270 return 0;
271}
272
273int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
274{
275 struct xenbus_transaction xbt;
276 struct xen_drm_front_cfg *plat_data;
277 int ret, conn;
278
279 plat_data = &front_info->cfg;
280
281again:
282 ret = xenbus_transaction_start(&xbt);
283 if (ret < 0) {
284 xenbus_dev_fatal(front_info->xb_dev, ret,
285 "starting transaction");
286 return ret;
287 }
288
289 for (conn = 0; conn < plat_data->num_connectors; conn++) {
290 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
291 plat_data->connectors[conn].xenstore_path,
292 XENDISPL_FIELD_REQ_RING_REF,
293 XENDISPL_FIELD_REQ_CHANNEL);
294 if (ret < 0)
295 goto fail;
296
297 ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
298 plat_data->connectors[conn].xenstore_path,
299 XENDISPL_FIELD_EVT_RING_REF,
300 XENDISPL_FIELD_EVT_CHANNEL);
301 if (ret < 0)
302 goto fail;
303 }
304
305 ret = xenbus_transaction_end(xbt, 0);
306 if (ret < 0) {
307 if (ret == -EAGAIN)
308 goto again;
309
310 xenbus_dev_fatal(front_info->xb_dev, ret,
311 "completing transaction");
312 goto fail_to_end;
313 }
314
315 return 0;
316
317fail:
318 xenbus_transaction_end(xbt, 1);
319
320fail_to_end:
321 xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
322 return ret;
323}
324
325void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
326{
327 int notify;
328
329 evtchnl->u.req.ring.req_prod_pvt++;
330 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
331 if (notify)
332 notify_remote_via_irq(evtchnl->irq);
333}
334
335void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
336 enum xen_drm_front_evtchnl_state state)
337{
338 unsigned long flags;
339 int i;
340
341 if (!front_info->evt_pairs)
342 return;
343
344 spin_lock_irqsave(&front_info->io_lock, flags);
345 for (i = 0; i < front_info->num_evt_pairs; i++) {
346 front_info->evt_pairs[i].req.state = state;
347 front_info->evt_pairs[i].evt.state = state;
348 }
349 spin_unlock_irqrestore(&front_info->io_lock, flags);
350}
351
352void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
353{
354 int i;
355
356 if (!front_info->evt_pairs)
357 return;
358
359 for (i = 0; i < front_info->num_evt_pairs; i++) {
360 evtchnl_free(front_info, &front_info->evt_pairs[i].req);
361 evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
362 }
363
364 kfree(front_info->evt_pairs);
365 front_info->evt_pairs = NULL;
366}