Linux Audio

Check our new training course

Loading...
v3.1
  1/******************************************************************************
  2 * Client-facing interface for the Xenbus driver.  In other words, the
  3 * interface between the Xenbus and the device-specific code, be it the
  4 * frontend or the backend of that driver.
  5 *
  6 * Copyright (C) 2005 XenSource Ltd
  7 *
  8 * This program is free software; you can redistribute it and/or
  9 * modify it under the terms of the GNU General Public License version 2
 10 * as published by the Free Software Foundation; or, when distributed
 11 * separately from the Linux kernel or incorporated into other
 12 * software packages, subject to the following license:
 13 *
 14 * Permission is hereby granted, free of charge, to any person obtaining a copy
 15 * of this source file (the "Software"), to deal in the Software without
 16 * restriction, including without limitation the rights to use, copy, modify,
 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 18 * and to permit persons to whom the Software is furnished to do so, subject to
 19 * the following conditions:
 20 *
 21 * The above copyright notice and this permission notice shall be included in
 22 * all copies or substantial portions of the Software.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 30 * IN THE SOFTWARE.
 31 */
 32
 
 33#include <linux/slab.h>
 34#include <linux/types.h>
 
 35#include <linux/vmalloc.h>
 
 36#include <asm/xen/hypervisor.h>
 
 37#include <xen/interface/xen.h>
 38#include <xen/interface/event_channel.h>
 
 39#include <xen/events.h>
 40#include <xen/grant_table.h>
 41#include <xen/xenbus.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42
 43const char *xenbus_strstate(enum xenbus_state state)
 44{
 45	static const char *const name[] = {
 46		[ XenbusStateUnknown      ] = "Unknown",
 47		[ XenbusStateInitialising ] = "Initialising",
 48		[ XenbusStateInitWait     ] = "InitWait",
 49		[ XenbusStateInitialised  ] = "Initialised",
 50		[ XenbusStateConnected    ] = "Connected",
 51		[ XenbusStateClosing      ] = "Closing",
 52		[ XenbusStateClosed	  ] = "Closed",
 53		[XenbusStateReconfiguring] = "Reconfiguring",
 54		[XenbusStateReconfigured] = "Reconfigured",
 55	};
 56	return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
 57}
 58EXPORT_SYMBOL_GPL(xenbus_strstate);
 59
 60/**
 61 * xenbus_watch_path - register a watch
 62 * @dev: xenbus device
 63 * @path: path to watch
 64 * @watch: watch to register
 65 * @callback: callback to register
 66 *
 67 * Register a @watch on the given path, using the given xenbus_watch structure
 68 * for storage, and the given @callback function as the callback.  Return 0 on
 69 * success, or -errno on error.  On success, the given @path will be saved as
 70 * @watch->node, and remains the caller's to free.  On error, @watch->node will
 71 * be NULL, the device will switch to %XenbusStateClosing, and the error will
 72 * be saved in the store.
 73 */
 74int xenbus_watch_path(struct xenbus_device *dev, const char *path,
 75		      struct xenbus_watch *watch,
 76		      void (*callback)(struct xenbus_watch *,
 77				       const char **, unsigned int))
 78{
 79	int err;
 80
 81	watch->node = path;
 82	watch->callback = callback;
 83
 84	err = register_xenbus_watch(watch);
 85
 86	if (err) {
 87		watch->node = NULL;
 88		watch->callback = NULL;
 89		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
 90	}
 91
 92	return err;
 93}
 94EXPORT_SYMBOL_GPL(xenbus_watch_path);
 95
 96
 97/**
 98 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
 99 * @dev: xenbus device
100 * @watch: watch to register
101 * @callback: callback to register
102 * @pathfmt: format of path to watch
103 *
104 * Register a watch on the given @path, using the given xenbus_watch
105 * structure for storage, and the given @callback function as the callback.
106 * Return 0 on success, or -errno on error.  On success, the watched path
107 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
108 * kfree().  On error, watch->node will be NULL, so the caller has nothing to
109 * free, the device will switch to %XenbusStateClosing, and the error will be
110 * saved in the store.
111 */
112int xenbus_watch_pathfmt(struct xenbus_device *dev,
113			 struct xenbus_watch *watch,
114			 void (*callback)(struct xenbus_watch *,
115					const char **, unsigned int),
116			 const char *pathfmt, ...)
117{
118	int err;
119	va_list ap;
120	char *path;
121
122	va_start(ap, pathfmt);
123	path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
124	va_end(ap);
125
126	if (!path) {
127		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
128		return -ENOMEM;
129	}
130	err = xenbus_watch_path(dev, path, watch, callback);
131
132	if (err)
133		kfree(path);
134	return err;
135}
136EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
137
138static void xenbus_switch_fatal(struct xenbus_device *, int, int,
139				const char *, ...);
140
141static int
142__xenbus_switch_state(struct xenbus_device *dev,
143		      enum xenbus_state state, int depth)
144{
145	/* We check whether the state is currently set to the given value, and
146	   if not, then the state is set.  We don't want to unconditionally
147	   write the given state, because we don't want to fire watches
148	   unnecessarily.  Furthermore, if the node has gone, we don't write
149	   to it, as the device will be tearing down, and we don't want to
150	   resurrect that directory.
151
152	   Note that, because of this cached value of our state, this
153	   function will not take a caller's Xenstore transaction
154	   (something it was trying to in the past) because dev->state
155	   would not get reset if the transaction was aborted.
156	 */
157
158	struct xenbus_transaction xbt;
159	int current_state;
160	int err, abort;
161
162	if (state == dev->state)
163		return 0;
164
165again:
166	abort = 1;
167
168	err = xenbus_transaction_start(&xbt);
169	if (err) {
170		xenbus_switch_fatal(dev, depth, err, "starting transaction");
171		return 0;
172	}
173
174	err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
175	if (err != 1)
176		goto abort;
177
178	err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
179	if (err) {
180		xenbus_switch_fatal(dev, depth, err, "writing new state");
181		goto abort;
182	}
183
184	abort = 0;
185abort:
186	err = xenbus_transaction_end(xbt, abort);
187	if (err) {
188		if (err == -EAGAIN && !abort)
189			goto again;
190		xenbus_switch_fatal(dev, depth, err, "ending transaction");
191	} else
192		dev->state = state;
193
194	return 0;
195}
196
197/**
198 * xenbus_switch_state
199 * @dev: xenbus device
200 * @state: new state
201 *
202 * Advertise in the store a change of the given driver to the given new_state.
203 * Return 0 on success, or -errno on error.  On error, the device will switch
204 * to XenbusStateClosing, and the error will be saved in the store.
205 */
206int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
207{
208	return __xenbus_switch_state(dev, state, 0);
209}
210
211EXPORT_SYMBOL_GPL(xenbus_switch_state);
212
213int xenbus_frontend_closed(struct xenbus_device *dev)
214{
215	xenbus_switch_state(dev, XenbusStateClosed);
216	complete(&dev->down);
217	return 0;
218}
219EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
220
221/**
222 * Return the path to the error node for the given device, or NULL on failure.
223 * If the value returned is non-NULL, then it is the caller's to kfree.
224 */
225static char *error_path(struct xenbus_device *dev)
226{
227	return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
228}
229
230
231static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
232				const char *fmt, va_list ap)
233{
234	int ret;
235	unsigned int len;
236	char *printf_buffer = NULL;
237	char *path_buffer = NULL;
238
239#define PRINTF_BUFFER_SIZE 4096
 
240	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
241	if (printf_buffer == NULL)
242		goto fail;
243
244	len = sprintf(printf_buffer, "%i ", -err);
245	ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
246
247	BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
248
249	dev_err(&dev->dev, "%s\n", printf_buffer);
250
251	path_buffer = error_path(dev);
252
253	if (path_buffer == NULL) {
254		dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
255		       dev->nodename, printf_buffer);
256		goto fail;
257	}
258
259	if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
260		dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
261		       dev->nodename, printf_buffer);
262		goto fail;
263	}
264
265fail:
266	kfree(printf_buffer);
267	kfree(path_buffer);
268}
269
270
271/**
272 * xenbus_dev_error
273 * @dev: xenbus device
274 * @err: error to report
275 * @fmt: error message format
276 *
277 * Report the given negative errno into the store, along with the given
278 * formatted message.
279 */
280void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
281{
282	va_list ap;
283
284	va_start(ap, fmt);
285	xenbus_va_dev_error(dev, err, fmt, ap);
286	va_end(ap);
287}
288EXPORT_SYMBOL_GPL(xenbus_dev_error);
289
290/**
291 * xenbus_dev_fatal
292 * @dev: xenbus device
293 * @err: error to report
294 * @fmt: error message format
295 *
296 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
297 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
298 * closedown of this driver and its peer.
299 */
300
301void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
302{
303	va_list ap;
304
305	va_start(ap, fmt);
306	xenbus_va_dev_error(dev, err, fmt, ap);
307	va_end(ap);
308
309	xenbus_switch_state(dev, XenbusStateClosing);
310}
311EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
312
313/**
314 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
315 * avoiding recursion within xenbus_switch_state.
316 */
317static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
318				const char *fmt, ...)
319{
320	va_list ap;
321
322	va_start(ap, fmt);
323	xenbus_va_dev_error(dev, err, fmt, ap);
324	va_end(ap);
325
326	if (!depth)
327		__xenbus_switch_state(dev, XenbusStateClosing, 1);
328}
329
330/**
331 * xenbus_grant_ring
332 * @dev: xenbus device
333 * @ring_mfn: mfn of ring to grant
334
335 * Grant access to the given @ring_mfn to the peer of the given device.  Return
336 * 0 on success, or -errno on error.  On error, the device will switch to
 
 
 
337 * XenbusStateClosing, and the error will be saved in the store.
338 */
339int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
 
340{
341	int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
342	if (err < 0)
343		xenbus_dev_fatal(dev, err, "granting access to ring page");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344	return err;
345}
346EXPORT_SYMBOL_GPL(xenbus_grant_ring);
347
348
349/**
350 * Allocate an event channel for the given xenbus_device, assigning the newly
351 * created local port to *port.  Return 0 on success, or -errno on error.  On
352 * error, the device will switch to XenbusStateClosing, and the error will be
353 * saved in the store.
354 */
355int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
356{
357	struct evtchn_alloc_unbound alloc_unbound;
358	int err;
359
360	alloc_unbound.dom = DOMID_SELF;
361	alloc_unbound.remote_dom = dev->otherend_id;
362
363	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
364					  &alloc_unbound);
365	if (err)
366		xenbus_dev_fatal(dev, err, "allocating event channel");
367	else
368		*port = alloc_unbound.port;
369
370	return err;
371}
372EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
373
374
375/**
376 * Bind to an existing interdomain event channel in another domain. Returns 0
377 * on success and stores the local port in *port. On error, returns -errno,
378 * switches the device to XenbusStateClosing, and saves the error in XenStore.
379 */
380int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
381{
382	struct evtchn_bind_interdomain bind_interdomain;
383	int err;
384
385	bind_interdomain.remote_dom = dev->otherend_id;
386	bind_interdomain.remote_port = remote_port;
387
388	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
389					  &bind_interdomain);
390	if (err)
391		xenbus_dev_fatal(dev, err,
392				 "binding to event channel %d from domain %d",
393				 remote_port, dev->otherend_id);
394	else
395		*port = bind_interdomain.local_port;
396
397	return err;
398}
399EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
400
401
402/**
403 * Free an existing event channel. Returns 0 on success or -errno on error.
404 */
405int xenbus_free_evtchn(struct xenbus_device *dev, int port)
406{
407	struct evtchn_close close;
408	int err;
409
410	close.port = port;
411
412	err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
413	if (err)
414		xenbus_dev_error(dev, err, "freeing event channel %d", port);
415
416	return err;
417}
418EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
419
420
421/**
422 * xenbus_map_ring_valloc
423 * @dev: xenbus device
424 * @gnt_ref: grant reference
 
425 * @vaddr: pointer to address to be filled out by mapping
426 *
427 * Based on Rusty Russell's skeleton driver's map_page.
428 * Map a page of memory into this domain from another domain's grant table.
429 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
430 * page to that address, and sets *vaddr to that address.
431 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
432 * or -ENOMEM on error. If an error is returned, device will switch to
433 * XenbusStateClosing and the error message will be saved in XenStore.
434 */
435int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
 
436{
437	struct gnttab_map_grant_ref op = {
438		.flags = GNTMAP_host_map,
439		.ref   = gnt_ref,
440		.dom   = dev->otherend_id,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441	};
442	struct vm_struct *area;
 
 
 
443
444	*vaddr = NULL;
445
446	area = xen_alloc_vm_area(PAGE_SIZE);
447	if (!area)
448		return -ENOMEM;
449
450	op.host_addr = (unsigned long)area->addr;
 
 
451
452	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
453		BUG();
 
 
 
 
 
 
 
 
454
455	if (op.status != GNTST_okay) {
456		xen_free_vm_area(area);
457		xenbus_dev_fatal(dev, op.status,
458				 "mapping in shared page %d from domain %d",
459				 gnt_ref, dev->otherend_id);
460		return op.status;
461	}
462
463	/* Stuff the handle in an unused field */
464	area->phys_addr = (unsigned long)op.handle;
465
466	*vaddr = area->addr;
 
 
 
 
467	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
468}
469EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
470
471
472/**
473 * xenbus_map_ring
474 * @dev: xenbus device
475 * @gnt_ref: grant reference
476 * @handle: pointer to grant handle to be filled
477 * @vaddr: address to be mapped to
 
 
478 *
479 * Map a page of memory into this domain from another domain's grant table.
480 * xenbus_map_ring does not allocate the virtual address space (you must do
481 * this yourself!). It only maps in the page to the specified address.
482 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
483 * or -ENOMEM on error. If an error is returned, device will switch to
484 * XenbusStateClosing and the error message will be saved in XenStore.
485 */
486int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
487		    grant_handle_t *handle, void *vaddr)
488{
489	struct gnttab_map_grant_ref op = {
490		.host_addr = (unsigned long)vaddr,
491		.flags     = GNTMAP_host_map,
492		.ref       = gnt_ref,
493		.dom       = dev->otherend_id,
494	};
495
496	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
497		BUG();
498
499	if (op.status != GNTST_okay) {
500		xenbus_dev_fatal(dev, op.status,
501				 "mapping in shared page %d from domain %d",
502				 gnt_ref, dev->otherend_id);
503	} else
504		*handle = op.handle;
505
506	return op.status;
 
507}
508EXPORT_SYMBOL_GPL(xenbus_map_ring);
509
510
511/**
512 * xenbus_unmap_ring_vfree
513 * @dev: xenbus device
514 * @vaddr: addr to unmap
515 *
516 * Based on Rusty Russell's skeleton driver's unmap_page.
517 * Unmap a page of memory in this domain that was imported from another domain.
518 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
519 * xenbus_map_ring_valloc (it will free the virtual address space).
520 * Returns 0 on success and returns GNTST_* on error
521 * (see xen/include/interface/grant_table.h).
522 */
523int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
524{
 
 
 
 
 
 
 
 
 
 
 
525	struct vm_struct *area;
526	struct gnttab_unmap_grant_ref op = {
527		.host_addr = (unsigned long)vaddr,
528	};
 
 
529
530	/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
531	 * method so that we don't have to muck with vmalloc internals here.
532	 * We could force the user to hang on to their struct vm_struct from
533	 * xenbus_map_ring_valloc, but these 6 lines considerably simplify
534	 * this API.
535	 */
536	read_lock(&vmlist_lock);
537	for (area = vmlist; area != NULL; area = area->next) {
538		if (area->addr == vaddr)
539			break;
540	}
541	read_unlock(&vmlist_lock);
542
 
 
 
 
 
543	if (!area) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544		xenbus_dev_error(dev, -ENOENT,
545				 "can't find mapped virtual address %p", vaddr);
546		return GNTST_bad_virt_addr;
547	}
548
549	op.handle = (grant_handle_t)area->phys_addr;
 
550
551	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
 
 
 
 
 
 
 
 
552		BUG();
553
554	if (op.status == GNTST_okay)
555		xen_free_vm_area(area);
 
 
 
 
 
 
 
 
 
 
 
 
 
556	else
557		xenbus_dev_error(dev, op.status,
558				 "unmapping page at handle %d error %d",
559				 (int16_t)area->phys_addr, op.status);
560
561	return op.status;
 
562}
563EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565
566/**
567 * xenbus_unmap_ring
568 * @dev: xenbus device
569 * @handle: grant handle
570 * @vaddr: addr to unmap
 
571 *
572 * Unmap a page of memory in this domain that was imported from another domain.
573 * Returns 0 on success and returns GNTST_* on error
574 * (see xen/include/interface/grant_table.h).
575 */
576int xenbus_unmap_ring(struct xenbus_device *dev,
577		      grant_handle_t handle, void *vaddr)
 
578{
579	struct gnttab_unmap_grant_ref op = {
580		.host_addr = (unsigned long)vaddr,
581		.handle    = handle,
582	};
 
 
583
584	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
 
 
 
 
585		BUG();
586
587	if (op.status != GNTST_okay)
588		xenbus_dev_error(dev, op.status,
589				 "unmapping page at handle %d error %d",
590				 handle, op.status);
 
 
 
 
 
 
591
592	return op.status;
593}
594EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
595
596
597/**
598 * xenbus_read_driver_state
599 * @path: path for driver
600 *
601 * Return the state of the driver rooted at the given store path, or
602 * XenbusStateUnknown if no state can be read.
603 */
604enum xenbus_state xenbus_read_driver_state(const char *path)
605{
606	enum xenbus_state result;
607	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
608	if (err)
609		result = XenbusStateUnknown;
610
611	return result;
612}
613EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
v5.4
  1/******************************************************************************
  2 * Client-facing interface for the Xenbus driver.  In other words, the
  3 * interface between the Xenbus and the device-specific code, be it the
  4 * frontend or the backend of that driver.
  5 *
  6 * Copyright (C) 2005 XenSource Ltd
  7 *
  8 * This program is free software; you can redistribute it and/or
  9 * modify it under the terms of the GNU General Public License version 2
 10 * as published by the Free Software Foundation; or, when distributed
 11 * separately from the Linux kernel or incorporated into other
 12 * software packages, subject to the following license:
 13 *
 14 * Permission is hereby granted, free of charge, to any person obtaining a copy
 15 * of this source file (the "Software"), to deal in the Software without
 16 * restriction, including without limitation the rights to use, copy, modify,
 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 18 * and to permit persons to whom the Software is furnished to do so, subject to
 19 * the following conditions:
 20 *
 21 * The above copyright notice and this permission notice shall be included in
 22 * all copies or substantial portions of the Software.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 30 * IN THE SOFTWARE.
 31 */
 32
 33#include <linux/mm.h>
 34#include <linux/slab.h>
 35#include <linux/types.h>
 36#include <linux/spinlock.h>
 37#include <linux/vmalloc.h>
 38#include <linux/export.h>
 39#include <asm/xen/hypervisor.h>
 40#include <xen/page.h>
 41#include <xen/interface/xen.h>
 42#include <xen/interface/event_channel.h>
 43#include <xen/balloon.h>
 44#include <xen/events.h>
 45#include <xen/grant_table.h>
 46#include <xen/xenbus.h>
 47#include <xen/xen.h>
 48#include <xen/features.h>
 49
 50#include "xenbus.h"
 51
 52#define XENBUS_PAGES(_grants)	(DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
 53
 54#define XENBUS_MAX_RING_PAGES	(XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
 55
 56struct xenbus_map_node {
 57	struct list_head next;
 58	union {
 59		struct {
 60			struct vm_struct *area;
 61		} pv;
 62		struct {
 63			struct page *pages[XENBUS_MAX_RING_PAGES];
 64			unsigned long addrs[XENBUS_MAX_RING_GRANTS];
 65			void *addr;
 66		} hvm;
 67	};
 68	grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
 69	unsigned int   nr_handles;
 70};
 71
 72static DEFINE_SPINLOCK(xenbus_valloc_lock);
 73static LIST_HEAD(xenbus_valloc_pages);
 74
 75struct xenbus_ring_ops {
 76	int (*map)(struct xenbus_device *dev,
 77		   grant_ref_t *gnt_refs, unsigned int nr_grefs,
 78		   void **vaddr);
 79	int (*unmap)(struct xenbus_device *dev, void *vaddr);
 80};
 81
 82static const struct xenbus_ring_ops *ring_ops __read_mostly;
 83
 84const char *xenbus_strstate(enum xenbus_state state)
 85{
 86	static const char *const name[] = {
 87		[ XenbusStateUnknown      ] = "Unknown",
 88		[ XenbusStateInitialising ] = "Initialising",
 89		[ XenbusStateInitWait     ] = "InitWait",
 90		[ XenbusStateInitialised  ] = "Initialised",
 91		[ XenbusStateConnected    ] = "Connected",
 92		[ XenbusStateClosing      ] = "Closing",
 93		[ XenbusStateClosed	  ] = "Closed",
 94		[XenbusStateReconfiguring] = "Reconfiguring",
 95		[XenbusStateReconfigured] = "Reconfigured",
 96	};
 97	return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
 98}
 99EXPORT_SYMBOL_GPL(xenbus_strstate);
100
101/**
102 * xenbus_watch_path - register a watch
103 * @dev: xenbus device
104 * @path: path to watch
105 * @watch: watch to register
106 * @callback: callback to register
107 *
108 * Register a @watch on the given path, using the given xenbus_watch structure
109 * for storage, and the given @callback function as the callback.  Return 0 on
110 * success, or -errno on error.  On success, the given @path will be saved as
111 * @watch->node, and remains the caller's to free.  On error, @watch->node will
112 * be NULL, the device will switch to %XenbusStateClosing, and the error will
113 * be saved in the store.
114 */
115int xenbus_watch_path(struct xenbus_device *dev, const char *path,
116		      struct xenbus_watch *watch,
117		      void (*callback)(struct xenbus_watch *,
118				       const char *, const char *))
119{
120	int err;
121
122	watch->node = path;
123	watch->callback = callback;
124
125	err = register_xenbus_watch(watch);
126
127	if (err) {
128		watch->node = NULL;
129		watch->callback = NULL;
130		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
131	}
132
133	return err;
134}
135EXPORT_SYMBOL_GPL(xenbus_watch_path);
136
137
138/**
139 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
140 * @dev: xenbus device
141 * @watch: watch to register
142 * @callback: callback to register
143 * @pathfmt: format of path to watch
144 *
145 * Register a watch on the given @path, using the given xenbus_watch
146 * structure for storage, and the given @callback function as the callback.
147 * Return 0 on success, or -errno on error.  On success, the watched path
148 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
149 * kfree().  On error, watch->node will be NULL, so the caller has nothing to
150 * free, the device will switch to %XenbusStateClosing, and the error will be
151 * saved in the store.
152 */
153int xenbus_watch_pathfmt(struct xenbus_device *dev,
154			 struct xenbus_watch *watch,
155			 void (*callback)(struct xenbus_watch *,
156					  const char *, const char *),
157			 const char *pathfmt, ...)
158{
159	int err;
160	va_list ap;
161	char *path;
162
163	va_start(ap, pathfmt);
164	path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
165	va_end(ap);
166
167	if (!path) {
168		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
169		return -ENOMEM;
170	}
171	err = xenbus_watch_path(dev, path, watch, callback);
172
173	if (err)
174		kfree(path);
175	return err;
176}
177EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
178
179static void xenbus_switch_fatal(struct xenbus_device *, int, int,
180				const char *, ...);
181
182static int
183__xenbus_switch_state(struct xenbus_device *dev,
184		      enum xenbus_state state, int depth)
185{
186	/* We check whether the state is currently set to the given value, and
187	   if not, then the state is set.  We don't want to unconditionally
188	   write the given state, because we don't want to fire watches
189	   unnecessarily.  Furthermore, if the node has gone, we don't write
190	   to it, as the device will be tearing down, and we don't want to
191	   resurrect that directory.
192
193	   Note that, because of this cached value of our state, this
194	   function will not take a caller's Xenstore transaction
195	   (something it was trying to in the past) because dev->state
196	   would not get reset if the transaction was aborted.
197	 */
198
199	struct xenbus_transaction xbt;
200	int current_state;
201	int err, abort;
202
203	if (state == dev->state)
204		return 0;
205
206again:
207	abort = 1;
208
209	err = xenbus_transaction_start(&xbt);
210	if (err) {
211		xenbus_switch_fatal(dev, depth, err, "starting transaction");
212		return 0;
213	}
214
215	err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
216	if (err != 1)
217		goto abort;
218
219	err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
220	if (err) {
221		xenbus_switch_fatal(dev, depth, err, "writing new state");
222		goto abort;
223	}
224
225	abort = 0;
226abort:
227	err = xenbus_transaction_end(xbt, abort);
228	if (err) {
229		if (err == -EAGAIN && !abort)
230			goto again;
231		xenbus_switch_fatal(dev, depth, err, "ending transaction");
232	} else
233		dev->state = state;
234
235	return 0;
236}
237
238/**
239 * xenbus_switch_state
240 * @dev: xenbus device
241 * @state: new state
242 *
243 * Advertise in the store a change of the given driver to the given new_state.
244 * Return 0 on success, or -errno on error.  On error, the device will switch
245 * to XenbusStateClosing, and the error will be saved in the store.
246 */
247int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
248{
249	return __xenbus_switch_state(dev, state, 0);
250}
251
252EXPORT_SYMBOL_GPL(xenbus_switch_state);
253
254int xenbus_frontend_closed(struct xenbus_device *dev)
255{
256	xenbus_switch_state(dev, XenbusStateClosed);
257	complete(&dev->down);
258	return 0;
259}
260EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
261
 
 
 
 
 
 
 
 
 
 
262static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
263				const char *fmt, va_list ap)
264{
 
265	unsigned int len;
266	char *printf_buffer;
267	char *path_buffer;
268
269#define PRINTF_BUFFER_SIZE 4096
270
271	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
272	if (!printf_buffer)
273		return;
274
275	len = sprintf(printf_buffer, "%i ", -err);
276	vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
 
 
277
278	dev_err(&dev->dev, "%s\n", printf_buffer);
279
280	path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
281	if (path_buffer)
282		xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
 
 
 
 
 
 
 
 
 
 
283
 
284	kfree(printf_buffer);
285	kfree(path_buffer);
286}
287
 
288/**
289 * xenbus_dev_error
290 * @dev: xenbus device
291 * @err: error to report
292 * @fmt: error message format
293 *
294 * Report the given negative errno into the store, along with the given
295 * formatted message.
296 */
297void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
298{
299	va_list ap;
300
301	va_start(ap, fmt);
302	xenbus_va_dev_error(dev, err, fmt, ap);
303	va_end(ap);
304}
305EXPORT_SYMBOL_GPL(xenbus_dev_error);
306
307/**
308 * xenbus_dev_fatal
309 * @dev: xenbus device
310 * @err: error to report
311 * @fmt: error message format
312 *
313 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
314 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
315 * closedown of this driver and its peer.
316 */
317
318void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
319{
320	va_list ap;
321
322	va_start(ap, fmt);
323	xenbus_va_dev_error(dev, err, fmt, ap);
324	va_end(ap);
325
326	xenbus_switch_state(dev, XenbusStateClosing);
327}
328EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
329
330/**
331 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
332 * avoiding recursion within xenbus_switch_state.
333 */
334static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
335				const char *fmt, ...)
336{
337	va_list ap;
338
339	va_start(ap, fmt);
340	xenbus_va_dev_error(dev, err, fmt, ap);
341	va_end(ap);
342
343	if (!depth)
344		__xenbus_switch_state(dev, XenbusStateClosing, 1);
345}
346
347/**
348 * xenbus_grant_ring
349 * @dev: xenbus device
350 * @vaddr: starting virtual address of the ring
351 * @nr_pages: number of pages to be granted
352 * @grefs: grant reference array to be filled in
353 *
354 * Grant access to the given @vaddr to the peer of the given device.
355 * Then fill in @grefs with grant references.  Return 0 on success, or
356 * -errno on error.  On error, the device will switch to
357 * XenbusStateClosing, and the error will be saved in the store.
358 */
359int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
360		      unsigned int nr_pages, grant_ref_t *grefs)
361{
362	int err;
363	int i, j;
364
365	for (i = 0; i < nr_pages; i++) {
366		err = gnttab_grant_foreign_access(dev->otherend_id,
367						  virt_to_gfn(vaddr), 0);
368		if (err < 0) {
369			xenbus_dev_fatal(dev, err,
370					 "granting access to ring page");
371			goto fail;
372		}
373		grefs[i] = err;
374
375		vaddr = vaddr + XEN_PAGE_SIZE;
376	}
377
378	return 0;
379
380fail:
381	for (j = 0; j < i; j++)
382		gnttab_end_foreign_access_ref(grefs[j], 0);
383	return err;
384}
385EXPORT_SYMBOL_GPL(xenbus_grant_ring);
386
387
388/**
389 * Allocate an event channel for the given xenbus_device, assigning the newly
390 * created local port to *port.  Return 0 on success, or -errno on error.  On
391 * error, the device will switch to XenbusStateClosing, and the error will be
392 * saved in the store.
393 */
394int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
395{
396	struct evtchn_alloc_unbound alloc_unbound;
397	int err;
398
399	alloc_unbound.dom = DOMID_SELF;
400	alloc_unbound.remote_dom = dev->otherend_id;
401
402	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
403					  &alloc_unbound);
404	if (err)
405		xenbus_dev_fatal(dev, err, "allocating event channel");
406	else
407		*port = alloc_unbound.port;
408
409	return err;
410}
411EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
412
413
414/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415 * Free an existing event channel. Returns 0 on success or -errno on error.
416 */
417int xenbus_free_evtchn(struct xenbus_device *dev, int port)
418{
419	struct evtchn_close close;
420	int err;
421
422	close.port = port;
423
424	err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
425	if (err)
426		xenbus_dev_error(dev, err, "freeing event channel %d", port);
427
428	return err;
429}
430EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
431
432
433/**
434 * xenbus_map_ring_valloc
435 * @dev: xenbus device
436 * @gnt_refs: grant reference array
437 * @nr_grefs: number of grant references
438 * @vaddr: pointer to address to be filled out by mapping
439 *
440 * Map @nr_grefs pages of memory into this domain from another
441 * domain's grant table.  xenbus_map_ring_valloc allocates @nr_grefs
442 * pages of virtual address space, maps the pages to that address, and
443 * sets *vaddr to that address.  Returns 0 on success, and GNTST_*
444 * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
445 * error. If an error is returned, device will switch to
446 * XenbusStateClosing and the error message will be saved in XenStore.
447 */
448int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
449			   unsigned int nr_grefs, void **vaddr)
450{
451	return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
452}
453EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
454
455/* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
456 * long), e.g. 32-on-64.  Caller is responsible for preparing the
457 * right array to feed into this function */
458static int __xenbus_map_ring(struct xenbus_device *dev,
459			     grant_ref_t *gnt_refs,
460			     unsigned int nr_grefs,
461			     grant_handle_t *handles,
462			     phys_addr_t *addrs,
463			     unsigned int flags,
464			     bool *leaked)
465{
466	struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
467	struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
468	int i, j;
469	int err = GNTST_okay;
470
471	if (nr_grefs > XENBUS_MAX_RING_GRANTS)
472		return -EINVAL;
473
474	for (i = 0; i < nr_grefs; i++) {
475		memset(&map[i], 0, sizeof(map[i]));
476		gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
477				  dev->otherend_id);
478		handles[i] = INVALID_GRANT_HANDLE;
479	}
480
481	gnttab_batch_map(map, i);
482
483	for (i = 0; i < nr_grefs; i++) {
484		if (map[i].status != GNTST_okay) {
485			err = map[i].status;
486			xenbus_dev_fatal(dev, map[i].status,
487					 "mapping in shared page %d from domain %d",
488					 gnt_refs[i], dev->otherend_id);
489			goto fail;
490		} else
491			handles[i] = map[i].handle;
492	}
493
494	return GNTST_okay;
495
496 fail:
497	for (i = j = 0; i < nr_grefs; i++) {
498		if (handles[i] != INVALID_GRANT_HANDLE) {
499			memset(&unmap[j], 0, sizeof(unmap[j]));
500			gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
501					    GNTMAP_host_map, handles[i]);
502			j++;
503		}
504	}
505
506	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
507		BUG();
508
509	*leaked = false;
510	for (i = 0; i < j; i++) {
511		if (unmap[i].status != GNTST_okay) {
512			*leaked = true;
513			break;
514		}
515	}
516
517	return err;
518}
519
520struct map_ring_valloc_hvm
521{
522	unsigned int idx;
523
524	/* Why do we need two arrays? See comment of __xenbus_map_ring */
525	phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
526	unsigned long addrs[XENBUS_MAX_RING_GRANTS];
527};
528
529static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
530					    unsigned int goffset,
531					    unsigned int len,
532					    void *data)
533{
534	struct map_ring_valloc_hvm *info = data;
535	unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
536
537	info->phys_addrs[info->idx] = vaddr;
538	info->addrs[info->idx] = vaddr;
539
540	info->idx++;
541}
542
543static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
544				      grant_ref_t *gnt_ref,
545				      unsigned int nr_grefs,
546				      void **vaddr)
547{
548	struct xenbus_map_node *node;
549	int err;
550	void *addr;
551	bool leaked = false;
552	struct map_ring_valloc_hvm info = {
553		.idx = 0,
554	};
555	unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
556
557	if (nr_grefs > XENBUS_MAX_RING_GRANTS)
558		return -EINVAL;
559
560	*vaddr = NULL;
561
562	node = kzalloc(sizeof(*node), GFP_KERNEL);
563	if (!node)
564		return -ENOMEM;
565
566	err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
567	if (err)
568		goto out_err;
569
570	gnttab_foreach_grant(node->hvm.pages, nr_grefs,
571			     xenbus_map_ring_setup_grant_hvm,
572			     &info);
573
574	err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
575				info.phys_addrs, GNTMAP_host_map, &leaked);
576	node->nr_handles = nr_grefs;
577
578	if (err)
579		goto out_free_ballooned_pages;
580
581	addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
582		    PAGE_KERNEL);
583	if (!addr) {
584		err = -ENOMEM;
585		goto out_xenbus_unmap_ring;
 
586	}
587
588	node->hvm.addr = addr;
 
589
590	spin_lock(&xenbus_valloc_lock);
591	list_add(&node->next, &xenbus_valloc_pages);
592	spin_unlock(&xenbus_valloc_lock);
593
594	*vaddr = addr;
595	return 0;
596
597 out_xenbus_unmap_ring:
598	if (!leaked)
599		xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
600	else
601		pr_alert("leaking %p size %u page(s)",
602			 addr, nr_pages);
603 out_free_ballooned_pages:
604	if (!leaked)
605		free_xenballooned_pages(nr_pages, node->hvm.pages);
606 out_err:
607	kfree(node);
608	return err;
609}
 
610
611
612/**
613 * xenbus_map_ring
614 * @dev: xenbus device
615 * @gnt_refs: grant reference array
616 * @nr_grefs: number of grant reference
617 * @handles: pointer to grant handle to be filled
618 * @vaddrs: addresses to be mapped to
619 * @leaked: fail to clean up a failed map, caller should not free vaddr
620 *
621 * Map pages of memory into this domain from another domain's grant table.
622 * xenbus_map_ring does not allocate the virtual address space (you must do
623 * this yourself!). It only maps in the pages to the specified address.
624 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
625 * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
626 * XenbusStateClosing and the first error message will be saved in XenStore.
627 * Further more if we fail to map the ring, caller should check @leaked.
628 * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
629 * should not free the address space of @vaddr.
630 */
631int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
632		    unsigned int nr_grefs, grant_handle_t *handles,
633		    unsigned long *vaddrs, bool *leaked)
634{
635	phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
636	int i;
637
638	if (nr_grefs > XENBUS_MAX_RING_GRANTS)
639		return -EINVAL;
640
641	for (i = 0; i < nr_grefs; i++)
642		phys_addrs[i] = (unsigned long)vaddrs[i];
 
 
 
 
643
644	return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
645				 phys_addrs, GNTMAP_host_map, leaked);
646}
647EXPORT_SYMBOL_GPL(xenbus_map_ring);
648
649
650/**
651 * xenbus_unmap_ring_vfree
652 * @dev: xenbus device
653 * @vaddr: addr to unmap
654 *
655 * Based on Rusty Russell's skeleton driver's unmap_page.
656 * Unmap a page of memory in this domain that was imported from another domain.
657 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
658 * xenbus_map_ring_valloc (it will free the virtual address space).
659 * Returns 0 on success and returns GNTST_* on error
660 * (see xen/include/interface/grant_table.h).
661 */
662int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
663{
664	return ring_ops->unmap(dev, vaddr);
665}
666EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
667
668#ifdef CONFIG_XEN_PV
669static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
670				     grant_ref_t *gnt_refs,
671				     unsigned int nr_grefs,
672				     void **vaddr)
673{
674	struct xenbus_map_node *node;
675	struct vm_struct *area;
676	pte_t *ptes[XENBUS_MAX_RING_GRANTS];
677	phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
678	int err = GNTST_okay;
679	int i;
680	bool leaked;
681
682	*vaddr = NULL;
683
684	if (nr_grefs > XENBUS_MAX_RING_GRANTS)
685		return -EINVAL;
 
 
 
 
 
 
 
 
686
687	node = kzalloc(sizeof(*node), GFP_KERNEL);
688	if (!node)
689		return -ENOMEM;
690
691	area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
692	if (!area) {
693		kfree(node);
694		return -ENOMEM;
695	}
696
697	for (i = 0; i < nr_grefs; i++)
698		phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
699
700	err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
701				phys_addrs,
702				GNTMAP_host_map | GNTMAP_contains_pte,
703				&leaked);
704	if (err)
705		goto failed;
706
707	node->nr_handles = nr_grefs;
708	node->pv.area = area;
709
710	spin_lock(&xenbus_valloc_lock);
711	list_add(&node->next, &xenbus_valloc_pages);
712	spin_unlock(&xenbus_valloc_lock);
713
714	*vaddr = area->addr;
715	return 0;
716
717failed:
718	if (!leaked)
719		free_vm_area(area);
720	else
721		pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
722
723	kfree(node);
724	return err;
725}
726
727static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
728{
729	struct xenbus_map_node *node;
730	struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
731	unsigned int level;
732	int i;
733	bool leaked = false;
734	int err;
735
736	spin_lock(&xenbus_valloc_lock);
737	list_for_each_entry(node, &xenbus_valloc_pages, next) {
738		if (node->pv.area->addr == vaddr) {
739			list_del(&node->next);
740			goto found;
741		}
742	}
743	node = NULL;
744 found:
745	spin_unlock(&xenbus_valloc_lock);
746
747	if (!node) {
748		xenbus_dev_error(dev, -ENOENT,
749				 "can't find mapped virtual address %p", vaddr);
750		return GNTST_bad_virt_addr;
751	}
752
753	for (i = 0; i < node->nr_handles; i++) {
754		unsigned long addr;
755
756		memset(&unmap[i], 0, sizeof(unmap[i]));
757		addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
758		unmap[i].host_addr = arbitrary_virt_to_machine(
759			lookup_address(addr, &level)).maddr;
760		unmap[i].dev_bus_addr = 0;
761		unmap[i].handle = node->handles[i];
762	}
763
764	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
765		BUG();
766
767	err = GNTST_okay;
768	leaked = false;
769	for (i = 0; i < node->nr_handles; i++) {
770		if (unmap[i].status != GNTST_okay) {
771			leaked = true;
772			xenbus_dev_error(dev, unmap[i].status,
773					 "unmapping page at handle %d error %d",
774					 node->handles[i], unmap[i].status);
775			err = unmap[i].status;
776			break;
777		}
778	}
779
780	if (!leaked)
781		free_vm_area(node->pv.area);
782	else
783		pr_alert("leaking VM area %p size %u page(s)",
784			 node->pv.area, node->nr_handles);
 
785
786	kfree(node);
787	return err;
788}
 
789
790static const struct xenbus_ring_ops ring_ops_pv = {
791	.map = xenbus_map_ring_valloc_pv,
792	.unmap = xenbus_unmap_ring_vfree_pv,
793};
794#endif
795
796struct unmap_ring_vfree_hvm
797{
798	unsigned int idx;
799	unsigned long addrs[XENBUS_MAX_RING_GRANTS];
800};
801
802static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
803					      unsigned int goffset,
804					      unsigned int len,
805					      void *data)
806{
807	struct unmap_ring_vfree_hvm *info = data;
808
809	info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
810
811	info->idx++;
812}
813
814static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
815{
816	int rv;
817	struct xenbus_map_node *node;
818	void *addr;
819	struct unmap_ring_vfree_hvm info = {
820		.idx = 0,
821	};
822	unsigned int nr_pages;
823
824	spin_lock(&xenbus_valloc_lock);
825	list_for_each_entry(node, &xenbus_valloc_pages, next) {
826		addr = node->hvm.addr;
827		if (addr == vaddr) {
828			list_del(&node->next);
829			goto found;
830		}
831	}
832	node = addr = NULL;
833 found:
834	spin_unlock(&xenbus_valloc_lock);
835
836	if (!node) {
837		xenbus_dev_error(dev, -ENOENT,
838				 "can't find mapped virtual address %p", vaddr);
839		return GNTST_bad_virt_addr;
840	}
841
842	nr_pages = XENBUS_PAGES(node->nr_handles);
843
844	gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
845			     xenbus_unmap_ring_setup_grant_hvm,
846			     &info);
847
848	rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
849			       info.addrs);
850	if (!rv) {
851		vunmap(vaddr);
852		free_xenballooned_pages(nr_pages, node->hvm.pages);
853	}
854	else
855		WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
856
857	kfree(node);
858	return rv;
859}
860
861/**
862 * xenbus_unmap_ring
863 * @dev: xenbus device
864 * @handles: grant handle array
865 * @nr_handles: number of handles in the array
866 * @vaddrs: addresses to unmap
867 *
868 * Unmap memory in this domain that was imported from another domain.
869 * Returns 0 on success and returns GNTST_* on error
870 * (see xen/include/interface/grant_table.h).
871 */
872int xenbus_unmap_ring(struct xenbus_device *dev,
873		      grant_handle_t *handles, unsigned int nr_handles,
874		      unsigned long *vaddrs)
875{
876	struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
877	int i;
878	int err;
879
880	if (nr_handles > XENBUS_MAX_RING_GRANTS)
881		return -EINVAL;
882
883	for (i = 0; i < nr_handles; i++)
884		gnttab_set_unmap_op(&unmap[i], vaddrs[i],
885				    GNTMAP_host_map, handles[i]);
886
887	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
888		BUG();
889
890	err = GNTST_okay;
891	for (i = 0; i < nr_handles; i++) {
892		if (unmap[i].status != GNTST_okay) {
893			xenbus_dev_error(dev, unmap[i].status,
894					 "unmapping page at handle %d error %d",
895					 handles[i], unmap[i].status);
896			err = unmap[i].status;
897			break;
898		}
899	}
900
901	return err;
902}
903EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
904
905
906/**
907 * xenbus_read_driver_state
908 * @path: path for driver
909 *
910 * Return the state of the driver rooted at the given store path, or
911 * XenbusStateUnknown if no state can be read.
912 */
913enum xenbus_state xenbus_read_driver_state(const char *path)
914{
915	enum xenbus_state result;
916	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
917	if (err)
918		result = XenbusStateUnknown;
919
920	return result;
921}
922EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
923
924static const struct xenbus_ring_ops ring_ops_hvm = {
925	.map = xenbus_map_ring_valloc_hvm,
926	.unmap = xenbus_unmap_ring_vfree_hvm,
927};
928
929void __init xenbus_ring_ops_init(void)
930{
931#ifdef CONFIG_XEN_PV
932	if (!xen_feature(XENFEAT_auto_translated_physmap))
933		ring_ops = &ring_ops_pv;
934	else
935#endif
936		ring_ops = &ring_ops_hvm;
937}