Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * Driver giving user-space access to the kernel's xenbus connection
  3 * to xenstore.
  4 *
  5 * Copyright (c) 2005, Christian Limpach
  6 * Copyright (c) 2005, Rusty Russell, IBM Corporation
  7 *
  8 * This program is free software; you can redistribute it and/or
  9 * modify it under the terms of the GNU General Public License version 2
 10 * as published by the Free Software Foundation; or, when distributed
 11 * separately from the Linux kernel or incorporated into other
 12 * software packages, subject to the following license:
 13 *
 14 * Permission is hereby granted, free of charge, to any person obtaining a copy
 15 * of this source file (the "Software"), to deal in the Software without
 16 * restriction, including without limitation the rights to use, copy, modify,
 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 18 * and to permit persons to whom the Software is furnished to do so, subject to
 19 * the following conditions:
 20 *
 21 * The above copyright notice and this permission notice shall be included in
 22 * all copies or substantial portions of the Software.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 30 * IN THE SOFTWARE.
 31 *
 32 * Changes:
 33 * 2008-10-07  Alex Zeffertt    Replaced /proc/xen/xenbus with xenfs filesystem
 34 *                              and /proc/xen compatibility mount point.
 35 *                              Turned xenfs into a loadable module.
 36 */
 37
 38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 39
 40#include <linux/kernel.h>
 41#include <linux/errno.h>
 42#include <linux/uio.h>
 43#include <linux/notifier.h>
 44#include <linux/wait.h>
 45#include <linux/fs.h>
 46#include <linux/poll.h>
 47#include <linux/mutex.h>
 48#include <linux/sched.h>
 49#include <linux/spinlock.h>
 50#include <linux/mount.h>
 51#include <linux/pagemap.h>
 52#include <linux/uaccess.h>
 53#include <linux/init.h>
 54#include <linux/namei.h>
 55#include <linux/string.h>
 56#include <linux/slab.h>
 57#include <linux/miscdevice.h>
 58#include <linux/workqueue.h>
 59
 60#include <xen/xenbus.h>
 61#include <xen/xen.h>
 62#include <asm/xen/hypervisor.h>
 63
 64#include "xenbus.h"
 65
 66unsigned int xb_dev_generation_id;
 67
 68/*
 69 * An element of a list of outstanding transactions, for which we're
 70 * still waiting a reply.
 71 */
 72struct xenbus_transaction_holder {
 73	struct list_head list;
 74	struct xenbus_transaction handle;
 75	unsigned int generation_id;
 76};
 77
 78/*
 79 * A buffer of data on the queue.
 80 */
 81struct read_buffer {
 82	struct list_head list;
 83	unsigned int cons;
 84	unsigned int len;
 85	char msg[] __counted_by(len);
 86};
 87
 88struct xenbus_file_priv {
 89	/*
 90	 * msgbuffer_mutex is held while partial requests are built up
 91	 * and complete requests are acted on.  It therefore protects
 92	 * the "transactions" and "watches" lists, and the partial
 93	 * request length and buffer.
 94	 *
 95	 * reply_mutex protects the reply being built up to return to
 96	 * usermode.  It nests inside msgbuffer_mutex but may be held
 97	 * alone during a watch callback.
 98	 */
 99	struct mutex msgbuffer_mutex;
100
101	/* In-progress transactions */
102	struct list_head transactions;
103
104	/* Active watches. */
105	struct list_head watches;
106
107	/* Partial request. */
108	unsigned int len;
109	union {
110		struct xsd_sockmsg msg;
111		char buffer[XENSTORE_PAYLOAD_MAX];
112	} u;
113
114	/* Response queue. */
115	struct mutex reply_mutex;
116	struct list_head read_buffers;
117	wait_queue_head_t read_waitq;
118
119	struct kref kref;
120
121	struct work_struct wq;
122};
123
124/* Read out any raw xenbus messages queued up. */
125static ssize_t xenbus_file_read(struct file *filp,
126			       char __user *ubuf,
127			       size_t len, loff_t *ppos)
128{
129	struct xenbus_file_priv *u = filp->private_data;
130	struct read_buffer *rb;
131	ssize_t i;
132	int ret;
133
134	mutex_lock(&u->reply_mutex);
135again:
136	while (list_empty(&u->read_buffers)) {
137		mutex_unlock(&u->reply_mutex);
138		if (filp->f_flags & O_NONBLOCK)
139			return -EAGAIN;
140
141		ret = wait_event_interruptible(u->read_waitq,
142					       !list_empty(&u->read_buffers));
143		if (ret)
144			return ret;
145		mutex_lock(&u->reply_mutex);
146	}
147
148	rb = list_entry(u->read_buffers.next, struct read_buffer, list);
149	i = 0;
150	while (i < len) {
151		size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
152
153		ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
154
155		i += sz - ret;
156		rb->cons += sz - ret;
157
158		if (ret != 0) {
159			if (i == 0)
160				i = -EFAULT;
161			goto out;
162		}
163
164		/* Clear out buffer if it has been consumed */
165		if (rb->cons == rb->len) {
166			list_del(&rb->list);
167			kfree(rb);
168			if (list_empty(&u->read_buffers))
169				break;
170			rb = list_entry(u->read_buffers.next,
171					struct read_buffer, list);
172		}
173	}
174	if (i == 0)
175		goto again;
176
177out:
178	mutex_unlock(&u->reply_mutex);
179	return i;
180}
181
182/*
183 * Add a buffer to the queue.  Caller must hold the appropriate lock
184 * if the queue is not local.  (Commonly the caller will build up
185 * multiple queued buffers on a temporary local list, and then add it
186 * to the appropriate list under lock once all the buffers have een
187 * successfully allocated.)
188 */
189static int queue_reply(struct list_head *queue, const void *data, size_t len)
190{
191	struct read_buffer *rb;
192
193	if (len == 0)
194		return 0;
195	if (len > XENSTORE_PAYLOAD_MAX)
196		return -EINVAL;
197
198	rb = kmalloc(struct_size(rb, msg, len), GFP_KERNEL);
199	if (rb == NULL)
200		return -ENOMEM;
201
202	rb->cons = 0;
203	rb->len = len;
204
205	memcpy(rb->msg, data, len);
206
207	list_add_tail(&rb->list, queue);
208	return 0;
209}
210
211/*
212 * Free all the read_buffer s on a list.
213 * Caller must have sole reference to list.
214 */
215static void queue_cleanup(struct list_head *list)
216{
217	struct read_buffer *rb;
218
219	while (!list_empty(list)) {
220		rb = list_entry(list->next, struct read_buffer, list);
221		list_del(list->next);
222		kfree(rb);
223	}
224}
225
226struct watch_adapter {
227	struct list_head list;
228	struct xenbus_watch watch;
229	struct xenbus_file_priv *dev_data;
230	char *token;
231};
232
233static void free_watch_adapter(struct watch_adapter *watch)
234{
235	kfree(watch->watch.node);
236	kfree(watch->token);
237	kfree(watch);
238}
239
240static struct watch_adapter *alloc_watch_adapter(const char *path,
241						 const char *token)
242{
243	struct watch_adapter *watch;
244
245	watch = kzalloc(sizeof(*watch), GFP_KERNEL);
246	if (watch == NULL)
247		goto out_fail;
248
249	watch->watch.node = kstrdup(path, GFP_KERNEL);
250	if (watch->watch.node == NULL)
251		goto out_free;
252
253	watch->token = kstrdup(token, GFP_KERNEL);
254	if (watch->token == NULL)
255		goto out_free;
256
257	return watch;
258
259out_free:
260	free_watch_adapter(watch);
261
262out_fail:
263	return NULL;
264}
265
266static void watch_fired(struct xenbus_watch *watch,
267			const char *path,
268			const char *token)
269{
270	struct watch_adapter *adap;
271	struct xsd_sockmsg hdr;
272	const char *token_caller;
273	int path_len, tok_len, body_len;
274	int ret;
275	LIST_HEAD(staging_q);
276
277	adap = container_of(watch, struct watch_adapter, watch);
278
279	token_caller = adap->token;
280
281	path_len = strlen(path) + 1;
282	tok_len = strlen(token_caller) + 1;
283	body_len = path_len + tok_len;
284
285	hdr.type = XS_WATCH_EVENT;
286	hdr.len = body_len;
287
288	mutex_lock(&adap->dev_data->reply_mutex);
289
290	ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
291	if (!ret)
292		ret = queue_reply(&staging_q, path, path_len);
293	if (!ret)
294		ret = queue_reply(&staging_q, token_caller, tok_len);
295
296	if (!ret) {
297		/* success: pass reply list onto watcher */
298		list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
299		wake_up(&adap->dev_data->read_waitq);
300	} else
301		queue_cleanup(&staging_q);
302
303	mutex_unlock(&adap->dev_data->reply_mutex);
304}
305
306static void xenbus_worker(struct work_struct *wq)
307{
308	struct xenbus_file_priv *u;
309	struct xenbus_transaction_holder *trans, *tmp;
310	struct watch_adapter *watch, *tmp_watch;
311	struct read_buffer *rb, *tmp_rb;
312
313	u = container_of(wq, struct xenbus_file_priv, wq);
314
315	/*
316	 * No need for locking here because there are no other users,
317	 * by definition.
318	 */
319
320	list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
321		xenbus_transaction_end(trans->handle, 1);
322		list_del(&trans->list);
323		kfree(trans);
324	}
325
326	list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
327		unregister_xenbus_watch(&watch->watch);
328		list_del(&watch->list);
329		free_watch_adapter(watch);
330	}
331
332	list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
333		list_del(&rb->list);
334		kfree(rb);
335	}
336	kfree(u);
337}
338
339static void xenbus_file_free(struct kref *kref)
340{
341	struct xenbus_file_priv *u;
342
343	/*
344	 * We might be called in xenbus_thread().
345	 * Use workqueue to avoid deadlock.
346	 */
347	u = container_of(kref, struct xenbus_file_priv, kref);
348	schedule_work(&u->wq);
349}
350
351static struct xenbus_transaction_holder *xenbus_get_transaction(
352	struct xenbus_file_priv *u, uint32_t tx_id)
353{
354	struct xenbus_transaction_holder *trans;
355
356	list_for_each_entry(trans, &u->transactions, list)
357		if (trans->handle.id == tx_id)
358			return trans;
359
360	return NULL;
361}
362
363void xenbus_dev_queue_reply(struct xb_req_data *req)
364{
365	struct xenbus_file_priv *u = req->par;
366	struct xenbus_transaction_holder *trans = NULL;
367	int rc;
368	LIST_HEAD(staging_q);
369
370	xs_request_exit(req);
371
372	mutex_lock(&u->msgbuffer_mutex);
373
374	if (req->type == XS_TRANSACTION_START) {
375		trans = xenbus_get_transaction(u, 0);
376		if (WARN_ON(!trans))
377			goto out;
378		if (req->msg.type == XS_ERROR) {
379			list_del(&trans->list);
380			kfree(trans);
381		} else {
382			rc = kstrtou32(req->body, 10, &trans->handle.id);
383			if (WARN_ON(rc))
384				goto out;
385		}
386	} else if (req->type == XS_TRANSACTION_END) {
387		trans = xenbus_get_transaction(u, req->msg.tx_id);
388		if (WARN_ON(!trans))
389			goto out;
390		list_del(&trans->list);
391		kfree(trans);
392	}
393
394	mutex_unlock(&u->msgbuffer_mutex);
395
396	mutex_lock(&u->reply_mutex);
397	rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg));
398	if (!rc)
399		rc = queue_reply(&staging_q, req->body, req->msg.len);
400	if (!rc) {
401		list_splice_tail(&staging_q, &u->read_buffers);
402		wake_up(&u->read_waitq);
403	} else {
404		queue_cleanup(&staging_q);
405	}
406	mutex_unlock(&u->reply_mutex);
407
408	kfree(req->body);
409	kfree(req);
410
411	kref_put(&u->kref, xenbus_file_free);
412
413	return;
414
415 out:
416	mutex_unlock(&u->msgbuffer_mutex);
417}
418
419static int xenbus_command_reply(struct xenbus_file_priv *u,
420				unsigned int msg_type, const char *reply)
421{
422	struct {
423		struct xsd_sockmsg hdr;
424		char body[16];
425	} msg;
426	int rc;
427
428	msg.hdr = u->u.msg;
429	msg.hdr.type = msg_type;
430	msg.hdr.len = strlen(reply) + 1;
431	if (msg.hdr.len > sizeof(msg.body))
432		return -E2BIG;
433	memcpy(&msg.body, reply, msg.hdr.len);
434
435	mutex_lock(&u->reply_mutex);
436	rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
437	wake_up(&u->read_waitq);
438	mutex_unlock(&u->reply_mutex);
439
440	if (!rc)
441		kref_put(&u->kref, xenbus_file_free);
442
443	return rc;
444}
445
446static int xenbus_write_transaction(unsigned msg_type,
447				    struct xenbus_file_priv *u)
448{
449	int rc;
450	struct xenbus_transaction_holder *trans = NULL;
451	struct {
452		struct xsd_sockmsg hdr;
453		char body[];
454	} *msg = (void *)u->u.buffer;
455
456	if (msg_type == XS_TRANSACTION_START) {
457		trans = kzalloc(sizeof(*trans), GFP_KERNEL);
458		if (!trans) {
459			rc = -ENOMEM;
460			goto out;
461		}
462		trans->generation_id = xb_dev_generation_id;
463		list_add(&trans->list, &u->transactions);
464	} else if (msg->hdr.tx_id != 0 &&
465		   !xenbus_get_transaction(u, msg->hdr.tx_id))
466		return xenbus_command_reply(u, XS_ERROR, "ENOENT");
467	else if (msg_type == XS_TRANSACTION_END &&
468		 !(msg->hdr.len == 2 &&
469		   (!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
470		return xenbus_command_reply(u, XS_ERROR, "EINVAL");
471	else if (msg_type == XS_TRANSACTION_END) {
472		trans = xenbus_get_transaction(u, msg->hdr.tx_id);
473		if (trans && trans->generation_id != xb_dev_generation_id) {
474			list_del(&trans->list);
475			kfree(trans);
476			if (!strcmp(msg->body, "T"))
477				return xenbus_command_reply(u, XS_ERROR,
478							    "EAGAIN");
479			else
480				return xenbus_command_reply(u,
481							    XS_TRANSACTION_END,
482							    "OK");
483		}
484	}
485
486	rc = xenbus_dev_request_and_reply(&msg->hdr, u);
487	if (rc && trans) {
488		list_del(&trans->list);
489		kfree(trans);
490	}
491
492out:
493	return rc;
494}
495
496static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
497{
498	struct watch_adapter *watch;
499	char *path, *token;
500	int err, rc;
 
501
502	path = u->u.buffer + sizeof(u->u.msg);
503	token = memchr(path, 0, u->u.msg.len);
504	if (token == NULL) {
505		rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
506		goto out;
507	}
508	token++;
509	if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
510		rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
511		goto out;
512	}
513
514	if (msg_type == XS_WATCH) {
515		watch = alloc_watch_adapter(path, token);
516		if (watch == NULL) {
517			rc = -ENOMEM;
518			goto out;
519		}
520
521		watch->watch.callback = watch_fired;
522		watch->dev_data = u;
523
524		err = register_xenbus_watch(&watch->watch);
525		if (err) {
526			free_watch_adapter(watch);
527			rc = err;
528			goto out;
529		}
530		list_add(&watch->list, &u->watches);
531	} else {
532		list_for_each_entry(watch, &u->watches, list) {
533			if (!strcmp(watch->token, token) &&
534			    !strcmp(watch->watch.node, path)) {
535				unregister_xenbus_watch(&watch->watch);
536				list_del(&watch->list);
537				free_watch_adapter(watch);
538				break;
539			}
540		}
541	}
542
543	/* Success.  Synthesize a reply to say all is OK. */
544	rc = xenbus_command_reply(u, msg_type, "OK");
545
546out:
547	return rc;
548}
549
550static ssize_t xenbus_file_write(struct file *filp,
551				const char __user *ubuf,
552				size_t len, loff_t *ppos)
553{
554	struct xenbus_file_priv *u = filp->private_data;
555	uint32_t msg_type;
556	int rc = len;
557	int ret;
 
558
559	/*
560	 * We're expecting usermode to be writing properly formed
561	 * xenbus messages.  If they write an incomplete message we
562	 * buffer it up.  Once it is complete, we act on it.
563	 */
564
565	/*
566	 * Make sure concurrent writers can't stomp all over each
567	 * other's messages and make a mess of our partial message
568	 * buffer.  We don't make any attemppt to stop multiple
569	 * writers from making a mess of each other's incomplete
570	 * messages; we're just trying to guarantee our own internal
571	 * consistency and make sure that single writes are handled
572	 * atomically.
573	 */
574	mutex_lock(&u->msgbuffer_mutex);
575
576	/* Get this out of the way early to avoid confusion */
577	if (len == 0)
578		goto out;
579
580	/* Can't write a xenbus message larger we can buffer */
581	if (len > sizeof(u->u.buffer) - u->len) {
582		/* On error, dump existing buffer */
583		u->len = 0;
584		rc = -EINVAL;
585		goto out;
586	}
587
588	ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
589
590	if (ret != 0) {
591		rc = -EFAULT;
592		goto out;
593	}
594
595	/* Deal with a partial copy. */
596	len -= ret;
597	rc = len;
598
599	u->len += len;
600
601	/* Return if we haven't got a full message yet */
602	if (u->len < sizeof(u->u.msg))
603		goto out;	/* not even the header yet */
604
605	/* If we're expecting a message that's larger than we can
606	   possibly send, dump what we have and return an error. */
607	if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
608		rc = -E2BIG;
609		u->len = 0;
610		goto out;
611	}
612
613	if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
614		goto out;	/* incomplete data portion */
615
616	/*
617	 * OK, now we have a complete message.  Do something with it.
618	 */
619
620	kref_get(&u->kref);
621
622	msg_type = u->u.msg.type;
623
624	switch (msg_type) {
625	case XS_WATCH:
626	case XS_UNWATCH:
627		/* (Un)Ask for some path to be watched for changes */
628		ret = xenbus_write_watch(msg_type, u);
629		break;
630
631	default:
632		/* Send out a transaction */
633		ret = xenbus_write_transaction(msg_type, u);
634		break;
635	}
636	if (ret != 0) {
637		rc = ret;
638		kref_put(&u->kref, xenbus_file_free);
639	}
640
641	/* Buffered message consumed */
642	u->len = 0;
643
644 out:
645	mutex_unlock(&u->msgbuffer_mutex);
646	return rc;
647}
648
649static int xenbus_file_open(struct inode *inode, struct file *filp)
650{
651	struct xenbus_file_priv *u;
652
653	if (xen_store_evtchn == 0)
654		return -ENOENT;
655
656	stream_open(inode, filp);
 
 
657
658	u = kzalloc(sizeof(*u), GFP_KERNEL);
659	if (u == NULL)
660		return -ENOMEM;
661
662	kref_init(&u->kref);
663
664	INIT_LIST_HEAD(&u->transactions);
665	INIT_LIST_HEAD(&u->watches);
666	INIT_LIST_HEAD(&u->read_buffers);
667	init_waitqueue_head(&u->read_waitq);
668	INIT_WORK(&u->wq, xenbus_worker);
669
670	mutex_init(&u->reply_mutex);
671	mutex_init(&u->msgbuffer_mutex);
672
673	filp->private_data = u;
674
675	return 0;
676}
677
678static int xenbus_file_release(struct inode *inode, struct file *filp)
679{
680	struct xenbus_file_priv *u = filp->private_data;
681
682	kref_put(&u->kref, xenbus_file_free);
683
684	return 0;
685}
686
687static __poll_t xenbus_file_poll(struct file *file, poll_table *wait)
688{
689	struct xenbus_file_priv *u = file->private_data;
690
691	poll_wait(file, &u->read_waitq, wait);
692	if (!list_empty(&u->read_buffers))
693		return EPOLLIN | EPOLLRDNORM;
694	return 0;
695}
696
697const struct file_operations xen_xenbus_fops = {
698	.read = xenbus_file_read,
699	.write = xenbus_file_write,
700	.open = xenbus_file_open,
701	.release = xenbus_file_release,
702	.poll = xenbus_file_poll,
703	.llseek = no_llseek,
704};
705EXPORT_SYMBOL_GPL(xen_xenbus_fops);
706
707static struct miscdevice xenbus_dev = {
708	.minor = MISC_DYNAMIC_MINOR,
709	.name = "xen/xenbus",
710	.fops = &xen_xenbus_fops,
711};
712
713static int __init xenbus_init(void)
714{
715	int err;
716
717	if (!xen_domain())
718		return -ENODEV;
719
720	err = misc_register(&xenbus_dev);
721	if (err)
722		pr_err("Could not register xenbus frontend device\n");
723	return err;
724}
725device_initcall(xenbus_init);
v4.17
  1/*
  2 * Driver giving user-space access to the kernel's xenbus connection
  3 * to xenstore.
  4 *
  5 * Copyright (c) 2005, Christian Limpach
  6 * Copyright (c) 2005, Rusty Russell, IBM Corporation
  7 *
  8 * This program is free software; you can redistribute it and/or
  9 * modify it under the terms of the GNU General Public License version 2
 10 * as published by the Free Software Foundation; or, when distributed
 11 * separately from the Linux kernel or incorporated into other
 12 * software packages, subject to the following license:
 13 *
 14 * Permission is hereby granted, free of charge, to any person obtaining a copy
 15 * of this source file (the "Software"), to deal in the Software without
 16 * restriction, including without limitation the rights to use, copy, modify,
 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 18 * and to permit persons to whom the Software is furnished to do so, subject to
 19 * the following conditions:
 20 *
 21 * The above copyright notice and this permission notice shall be included in
 22 * all copies or substantial portions of the Software.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 30 * IN THE SOFTWARE.
 31 *
 32 * Changes:
 33 * 2008-10-07  Alex Zeffertt    Replaced /proc/xen/xenbus with xenfs filesystem
 34 *                              and /proc/xen compatibility mount point.
 35 *                              Turned xenfs into a loadable module.
 36 */
 37
 38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 39
 40#include <linux/kernel.h>
 41#include <linux/errno.h>
 42#include <linux/uio.h>
 43#include <linux/notifier.h>
 44#include <linux/wait.h>
 45#include <linux/fs.h>
 46#include <linux/poll.h>
 47#include <linux/mutex.h>
 48#include <linux/sched.h>
 49#include <linux/spinlock.h>
 50#include <linux/mount.h>
 51#include <linux/pagemap.h>
 52#include <linux/uaccess.h>
 53#include <linux/init.h>
 54#include <linux/namei.h>
 55#include <linux/string.h>
 56#include <linux/slab.h>
 57#include <linux/miscdevice.h>
 
 58
 59#include <xen/xenbus.h>
 60#include <xen/xen.h>
 61#include <asm/xen/hypervisor.h>
 62
 63#include "xenbus.h"
 64
 
 
 65/*
 66 * An element of a list of outstanding transactions, for which we're
 67 * still waiting a reply.
 68 */
 69struct xenbus_transaction_holder {
 70	struct list_head list;
 71	struct xenbus_transaction handle;
 
 72};
 73
 74/*
 75 * A buffer of data on the queue.
 76 */
 77struct read_buffer {
 78	struct list_head list;
 79	unsigned int cons;
 80	unsigned int len;
 81	char msg[];
 82};
 83
 84struct xenbus_file_priv {
 85	/*
 86	 * msgbuffer_mutex is held while partial requests are built up
 87	 * and complete requests are acted on.  It therefore protects
 88	 * the "transactions" and "watches" lists, and the partial
 89	 * request length and buffer.
 90	 *
 91	 * reply_mutex protects the reply being built up to return to
 92	 * usermode.  It nests inside msgbuffer_mutex but may be held
 93	 * alone during a watch callback.
 94	 */
 95	struct mutex msgbuffer_mutex;
 96
 97	/* In-progress transactions */
 98	struct list_head transactions;
 99
100	/* Active watches. */
101	struct list_head watches;
102
103	/* Partial request. */
104	unsigned int len;
105	union {
106		struct xsd_sockmsg msg;
107		char buffer[XENSTORE_PAYLOAD_MAX];
108	} u;
109
110	/* Response queue. */
111	struct mutex reply_mutex;
112	struct list_head read_buffers;
113	wait_queue_head_t read_waitq;
114
115	struct kref kref;
 
 
116};
117
118/* Read out any raw xenbus messages queued up. */
119static ssize_t xenbus_file_read(struct file *filp,
120			       char __user *ubuf,
121			       size_t len, loff_t *ppos)
122{
123	struct xenbus_file_priv *u = filp->private_data;
124	struct read_buffer *rb;
125	unsigned i;
126	int ret;
127
128	mutex_lock(&u->reply_mutex);
129again:
130	while (list_empty(&u->read_buffers)) {
131		mutex_unlock(&u->reply_mutex);
132		if (filp->f_flags & O_NONBLOCK)
133			return -EAGAIN;
134
135		ret = wait_event_interruptible(u->read_waitq,
136					       !list_empty(&u->read_buffers));
137		if (ret)
138			return ret;
139		mutex_lock(&u->reply_mutex);
140	}
141
142	rb = list_entry(u->read_buffers.next, struct read_buffer, list);
143	i = 0;
144	while (i < len) {
145		unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
146
147		ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
148
149		i += sz - ret;
150		rb->cons += sz - ret;
151
152		if (ret != 0) {
153			if (i == 0)
154				i = -EFAULT;
155			goto out;
156		}
157
158		/* Clear out buffer if it has been consumed */
159		if (rb->cons == rb->len) {
160			list_del(&rb->list);
161			kfree(rb);
162			if (list_empty(&u->read_buffers))
163				break;
164			rb = list_entry(u->read_buffers.next,
165					struct read_buffer, list);
166		}
167	}
168	if (i == 0)
169		goto again;
170
171out:
172	mutex_unlock(&u->reply_mutex);
173	return i;
174}
175
176/*
177 * Add a buffer to the queue.  Caller must hold the appropriate lock
178 * if the queue is not local.  (Commonly the caller will build up
179 * multiple queued buffers on a temporary local list, and then add it
180 * to the appropriate list under lock once all the buffers have een
181 * successfully allocated.)
182 */
183static int queue_reply(struct list_head *queue, const void *data, size_t len)
184{
185	struct read_buffer *rb;
186
187	if (len == 0)
188		return 0;
189	if (len > XENSTORE_PAYLOAD_MAX)
190		return -EINVAL;
191
192	rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
193	if (rb == NULL)
194		return -ENOMEM;
195
196	rb->cons = 0;
197	rb->len = len;
198
199	memcpy(rb->msg, data, len);
200
201	list_add_tail(&rb->list, queue);
202	return 0;
203}
204
205/*
206 * Free all the read_buffer s on a list.
207 * Caller must have sole reference to list.
208 */
209static void queue_cleanup(struct list_head *list)
210{
211	struct read_buffer *rb;
212
213	while (!list_empty(list)) {
214		rb = list_entry(list->next, struct read_buffer, list);
215		list_del(list->next);
216		kfree(rb);
217	}
218}
219
220struct watch_adapter {
221	struct list_head list;
222	struct xenbus_watch watch;
223	struct xenbus_file_priv *dev_data;
224	char *token;
225};
226
227static void free_watch_adapter(struct watch_adapter *watch)
228{
229	kfree(watch->watch.node);
230	kfree(watch->token);
231	kfree(watch);
232}
233
234static struct watch_adapter *alloc_watch_adapter(const char *path,
235						 const char *token)
236{
237	struct watch_adapter *watch;
238
239	watch = kzalloc(sizeof(*watch), GFP_KERNEL);
240	if (watch == NULL)
241		goto out_fail;
242
243	watch->watch.node = kstrdup(path, GFP_KERNEL);
244	if (watch->watch.node == NULL)
245		goto out_free;
246
247	watch->token = kstrdup(token, GFP_KERNEL);
248	if (watch->token == NULL)
249		goto out_free;
250
251	return watch;
252
253out_free:
254	free_watch_adapter(watch);
255
256out_fail:
257	return NULL;
258}
259
260static void watch_fired(struct xenbus_watch *watch,
261			const char *path,
262			const char *token)
263{
264	struct watch_adapter *adap;
265	struct xsd_sockmsg hdr;
266	const char *token_caller;
267	int path_len, tok_len, body_len;
268	int ret;
269	LIST_HEAD(staging_q);
270
271	adap = container_of(watch, struct watch_adapter, watch);
272
273	token_caller = adap->token;
274
275	path_len = strlen(path) + 1;
276	tok_len = strlen(token_caller) + 1;
277	body_len = path_len + tok_len;
278
279	hdr.type = XS_WATCH_EVENT;
280	hdr.len = body_len;
281
282	mutex_lock(&adap->dev_data->reply_mutex);
283
284	ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
285	if (!ret)
286		ret = queue_reply(&staging_q, path, path_len);
287	if (!ret)
288		ret = queue_reply(&staging_q, token_caller, tok_len);
289
290	if (!ret) {
291		/* success: pass reply list onto watcher */
292		list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
293		wake_up(&adap->dev_data->read_waitq);
294	} else
295		queue_cleanup(&staging_q);
296
297	mutex_unlock(&adap->dev_data->reply_mutex);
298}
299
300static void xenbus_file_free(struct kref *kref)
301{
302	struct xenbus_file_priv *u;
303	struct xenbus_transaction_holder *trans, *tmp;
304	struct watch_adapter *watch, *tmp_watch;
305	struct read_buffer *rb, *tmp_rb;
306
307	u = container_of(kref, struct xenbus_file_priv, kref);
308
309	/*
310	 * No need for locking here because there are no other users,
311	 * by definition.
312	 */
313
314	list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
315		xenbus_transaction_end(trans->handle, 1);
316		list_del(&trans->list);
317		kfree(trans);
318	}
319
320	list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
321		unregister_xenbus_watch(&watch->watch);
322		list_del(&watch->list);
323		free_watch_adapter(watch);
324	}
325
326	list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
327		list_del(&rb->list);
328		kfree(rb);
329	}
330	kfree(u);
331}
332
 
 
 
 
 
 
 
 
 
 
 
 
333static struct xenbus_transaction_holder *xenbus_get_transaction(
334	struct xenbus_file_priv *u, uint32_t tx_id)
335{
336	struct xenbus_transaction_holder *trans;
337
338	list_for_each_entry(trans, &u->transactions, list)
339		if (trans->handle.id == tx_id)
340			return trans;
341
342	return NULL;
343}
344
345void xenbus_dev_queue_reply(struct xb_req_data *req)
346{
347	struct xenbus_file_priv *u = req->par;
348	struct xenbus_transaction_holder *trans = NULL;
349	int rc;
350	LIST_HEAD(staging_q);
351
352	xs_request_exit(req);
353
354	mutex_lock(&u->msgbuffer_mutex);
355
356	if (req->type == XS_TRANSACTION_START) {
357		trans = xenbus_get_transaction(u, 0);
358		if (WARN_ON(!trans))
359			goto out;
360		if (req->msg.type == XS_ERROR) {
361			list_del(&trans->list);
362			kfree(trans);
363		} else {
364			rc = kstrtou32(req->body, 10, &trans->handle.id);
365			if (WARN_ON(rc))
366				goto out;
367		}
368	} else if (req->type == XS_TRANSACTION_END) {
369		trans = xenbus_get_transaction(u, req->msg.tx_id);
370		if (WARN_ON(!trans))
371			goto out;
372		list_del(&trans->list);
373		kfree(trans);
374	}
375
376	mutex_unlock(&u->msgbuffer_mutex);
377
378	mutex_lock(&u->reply_mutex);
379	rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg));
380	if (!rc)
381		rc = queue_reply(&staging_q, req->body, req->msg.len);
382	if (!rc) {
383		list_splice_tail(&staging_q, &u->read_buffers);
384		wake_up(&u->read_waitq);
385	} else {
386		queue_cleanup(&staging_q);
387	}
388	mutex_unlock(&u->reply_mutex);
389
390	kfree(req->body);
391	kfree(req);
392
393	kref_put(&u->kref, xenbus_file_free);
394
395	return;
396
397 out:
398	mutex_unlock(&u->msgbuffer_mutex);
399}
400
401static int xenbus_command_reply(struct xenbus_file_priv *u,
402				unsigned int msg_type, const char *reply)
403{
404	struct {
405		struct xsd_sockmsg hdr;
406		char body[16];
407	} msg;
408	int rc;
409
410	msg.hdr = u->u.msg;
411	msg.hdr.type = msg_type;
412	msg.hdr.len = strlen(reply) + 1;
413	if (msg.hdr.len > sizeof(msg.body))
414		return -E2BIG;
415	memcpy(&msg.body, reply, msg.hdr.len);
416
417	mutex_lock(&u->reply_mutex);
418	rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
419	wake_up(&u->read_waitq);
420	mutex_unlock(&u->reply_mutex);
421
422	if (!rc)
423		kref_put(&u->kref, xenbus_file_free);
424
425	return rc;
426}
427
428static int xenbus_write_transaction(unsigned msg_type,
429				    struct xenbus_file_priv *u)
430{
431	int rc;
432	struct xenbus_transaction_holder *trans = NULL;
433	struct {
434		struct xsd_sockmsg hdr;
435		char body[];
436	} *msg = (void *)u->u.buffer;
437
438	if (msg_type == XS_TRANSACTION_START) {
439		trans = kzalloc(sizeof(*trans), GFP_KERNEL);
440		if (!trans) {
441			rc = -ENOMEM;
442			goto out;
443		}
 
444		list_add(&trans->list, &u->transactions);
445	} else if (msg->hdr.tx_id != 0 &&
446		   !xenbus_get_transaction(u, msg->hdr.tx_id))
447		return xenbus_command_reply(u, XS_ERROR, "ENOENT");
448	else if (msg_type == XS_TRANSACTION_END &&
449		 !(msg->hdr.len == 2 &&
450		   (!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
451		return xenbus_command_reply(u, XS_ERROR, "EINVAL");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
453	rc = xenbus_dev_request_and_reply(&msg->hdr, u);
454	if (rc && trans) {
455		list_del(&trans->list);
456		kfree(trans);
457	}
458
459out:
460	return rc;
461}
462
463static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
464{
465	struct watch_adapter *watch;
466	char *path, *token;
467	int err, rc;
468	LIST_HEAD(staging_q);
469
470	path = u->u.buffer + sizeof(u->u.msg);
471	token = memchr(path, 0, u->u.msg.len);
472	if (token == NULL) {
473		rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
474		goto out;
475	}
476	token++;
477	if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
478		rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
479		goto out;
480	}
481
482	if (msg_type == XS_WATCH) {
483		watch = alloc_watch_adapter(path, token);
484		if (watch == NULL) {
485			rc = -ENOMEM;
486			goto out;
487		}
488
489		watch->watch.callback = watch_fired;
490		watch->dev_data = u;
491
492		err = register_xenbus_watch(&watch->watch);
493		if (err) {
494			free_watch_adapter(watch);
495			rc = err;
496			goto out;
497		}
498		list_add(&watch->list, &u->watches);
499	} else {
500		list_for_each_entry(watch, &u->watches, list) {
501			if (!strcmp(watch->token, token) &&
502			    !strcmp(watch->watch.node, path)) {
503				unregister_xenbus_watch(&watch->watch);
504				list_del(&watch->list);
505				free_watch_adapter(watch);
506				break;
507			}
508		}
509	}
510
511	/* Success.  Synthesize a reply to say all is OK. */
512	rc = xenbus_command_reply(u, msg_type, "OK");
513
514out:
515	return rc;
516}
517
518static ssize_t xenbus_file_write(struct file *filp,
519				const char __user *ubuf,
520				size_t len, loff_t *ppos)
521{
522	struct xenbus_file_priv *u = filp->private_data;
523	uint32_t msg_type;
524	int rc = len;
525	int ret;
526	LIST_HEAD(staging_q);
527
528	/*
529	 * We're expecting usermode to be writing properly formed
530	 * xenbus messages.  If they write an incomplete message we
531	 * buffer it up.  Once it is complete, we act on it.
532	 */
533
534	/*
535	 * Make sure concurrent writers can't stomp all over each
536	 * other's messages and make a mess of our partial message
537	 * buffer.  We don't make any attemppt to stop multiple
538	 * writers from making a mess of each other's incomplete
539	 * messages; we're just trying to guarantee our own internal
540	 * consistency and make sure that single writes are handled
541	 * atomically.
542	 */
543	mutex_lock(&u->msgbuffer_mutex);
544
545	/* Get this out of the way early to avoid confusion */
546	if (len == 0)
547		goto out;
548
549	/* Can't write a xenbus message larger we can buffer */
550	if (len > sizeof(u->u.buffer) - u->len) {
551		/* On error, dump existing buffer */
552		u->len = 0;
553		rc = -EINVAL;
554		goto out;
555	}
556
557	ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
558
559	if (ret != 0) {
560		rc = -EFAULT;
561		goto out;
562	}
563
564	/* Deal with a partial copy. */
565	len -= ret;
566	rc = len;
567
568	u->len += len;
569
570	/* Return if we haven't got a full message yet */
571	if (u->len < sizeof(u->u.msg))
572		goto out;	/* not even the header yet */
573
574	/* If we're expecting a message that's larger than we can
575	   possibly send, dump what we have and return an error. */
576	if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
577		rc = -E2BIG;
578		u->len = 0;
579		goto out;
580	}
581
582	if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
583		goto out;	/* incomplete data portion */
584
585	/*
586	 * OK, now we have a complete message.  Do something with it.
587	 */
588
589	kref_get(&u->kref);
590
591	msg_type = u->u.msg.type;
592
593	switch (msg_type) {
594	case XS_WATCH:
595	case XS_UNWATCH:
596		/* (Un)Ask for some path to be watched for changes */
597		ret = xenbus_write_watch(msg_type, u);
598		break;
599
600	default:
601		/* Send out a transaction */
602		ret = xenbus_write_transaction(msg_type, u);
603		break;
604	}
605	if (ret != 0) {
606		rc = ret;
607		kref_put(&u->kref, xenbus_file_free);
608	}
609
610	/* Buffered message consumed */
611	u->len = 0;
612
613 out:
614	mutex_unlock(&u->msgbuffer_mutex);
615	return rc;
616}
617
618static int xenbus_file_open(struct inode *inode, struct file *filp)
619{
620	struct xenbus_file_priv *u;
621
622	if (xen_store_evtchn == 0)
623		return -ENOENT;
624
625	nonseekable_open(inode, filp);
626
627	filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
628
629	u = kzalloc(sizeof(*u), GFP_KERNEL);
630	if (u == NULL)
631		return -ENOMEM;
632
633	kref_init(&u->kref);
634
635	INIT_LIST_HEAD(&u->transactions);
636	INIT_LIST_HEAD(&u->watches);
637	INIT_LIST_HEAD(&u->read_buffers);
638	init_waitqueue_head(&u->read_waitq);
 
639
640	mutex_init(&u->reply_mutex);
641	mutex_init(&u->msgbuffer_mutex);
642
643	filp->private_data = u;
644
645	return 0;
646}
647
648static int xenbus_file_release(struct inode *inode, struct file *filp)
649{
650	struct xenbus_file_priv *u = filp->private_data;
651
652	kref_put(&u->kref, xenbus_file_free);
653
654	return 0;
655}
656
657static __poll_t xenbus_file_poll(struct file *file, poll_table *wait)
658{
659	struct xenbus_file_priv *u = file->private_data;
660
661	poll_wait(file, &u->read_waitq, wait);
662	if (!list_empty(&u->read_buffers))
663		return EPOLLIN | EPOLLRDNORM;
664	return 0;
665}
666
667const struct file_operations xen_xenbus_fops = {
668	.read = xenbus_file_read,
669	.write = xenbus_file_write,
670	.open = xenbus_file_open,
671	.release = xenbus_file_release,
672	.poll = xenbus_file_poll,
673	.llseek = no_llseek,
674};
675EXPORT_SYMBOL_GPL(xen_xenbus_fops);
676
677static struct miscdevice xenbus_dev = {
678	.minor = MISC_DYNAMIC_MINOR,
679	.name = "xen/xenbus",
680	.fops = &xen_xenbus_fops,
681};
682
683static int __init xenbus_init(void)
684{
685	int err;
686
687	if (!xen_domain())
688		return -ENODEV;
689
690	err = misc_register(&xenbus_dev);
691	if (err)
692		pr_err("Could not register xenbus frontend device\n");
693	return err;
694}
695device_initcall(xenbus_init);