Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*P:200 This contains all the /dev/lguest code, whereby the userspace
  2 * launcher controls and communicates with the Guest.  For example,
  3 * the first write will tell us the Guest's memory layout and entry
  4 * point.  A read will run the Guest until something happens, such as
  5 * a signal or the Guest doing a NOTIFY out to the Launcher.  There is
  6 * also a way for the Launcher to attach eventfds to particular NOTIFY
  7 * values instead of returning from the read() call.
  8:*/
  9#include <linux/uaccess.h>
 10#include <linux/miscdevice.h>
 11#include <linux/fs.h>
 12#include <linux/sched.h>
 13#include <linux/eventfd.h>
 14#include <linux/file.h>
 15#include <linux/slab.h>
 16#include "lg.h"
 17
 18/*L:056
 19 * Before we move on, let's jump ahead and look at what the kernel does when
 20 * it needs to look up the eventfds.  That will complete our picture of how we
 21 * use RCU.
 22 *
 23 * The notification value is in cpu->pending_notify: we return true if it went
 24 * to an eventfd.
 25 */
 26bool send_notify_to_eventfd(struct lg_cpu *cpu)
 27{
 28	unsigned int i;
 29	struct lg_eventfd_map *map;
 30
 31	/*
 32	 * This "rcu_read_lock()" helps track when someone is still looking at
 33	 * the (RCU-using) eventfds array.  It's not actually a lock at all;
 34	 * indeed it's a noop in many configurations.  (You didn't expect me to
 35	 * explain all the RCU secrets here, did you?)
 36	 */
 37	rcu_read_lock();
 38	/*
 39	 * rcu_dereference is the counter-side of rcu_assign_pointer(); it
 40	 * makes sure we don't access the memory pointed to by
 41	 * cpu->lg->eventfds before cpu->lg->eventfds is set.  Sounds crazy,
 42	 * but Alpha allows this!  Paul McKenney points out that a really
 43	 * aggressive compiler could have the same effect:
 44	 *   http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
 45	 *
 46	 * So play safe, use rcu_dereference to get the rcu-protected pointer:
 47	 */
 48	map = rcu_dereference(cpu->lg->eventfds);
 49	/*
 50	 * Simple array search: even if they add an eventfd while we do this,
 51	 * we'll continue to use the old array and just won't see the new one.
 52	 */
 53	for (i = 0; i < map->num; i++) {
 54		if (map->map[i].addr == cpu->pending_notify) {
 55			eventfd_signal(map->map[i].event, 1);
 56			cpu->pending_notify = 0;
 57			break;
 58		}
 59	}
 60	/* We're done with the rcu-protected variable cpu->lg->eventfds. */
 61	rcu_read_unlock();
 62
 63	/* If we cleared the notification, it's because we found a match. */
 64	return cpu->pending_notify == 0;
 65}
 66
 67/*L:055
 68 * One of the more tricksy tricks in the Linux Kernel is a technique called
 69 * Read Copy Update.  Since one point of lguest is to teach lguest journeyers
 70 * about kernel coding, I use it here.  (In case you're curious, other purposes
 71 * include learning about virtualization and instilling a deep appreciation for
 72 * simplicity and puppies).
 73 *
 74 * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
 75 * add new eventfds without ever blocking readers from accessing the array.
 76 * The current Launcher only does this during boot, so that never happens.  But
 77 * Read Copy Update is cool, and adding a lock risks damaging even more puppies
 78 * than this code does.
 79 *
 80 * We allocate a brand new one-larger array, copy the old one and add our new
 81 * element.  Then we make the lg eventfd pointer point to the new array.
 82 * That's the easy part: now we need to free the old one, but we need to make
 83 * sure no slow CPU somewhere is still looking at it.  That's what
 84 * synchronize_rcu does for us: waits until every CPU has indicated that it has
 85 * moved on to know it's no longer using the old one.
 86 *
 87 * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
 88 */
 89static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
 90{
 91	struct lg_eventfd_map *new, *old = lg->eventfds;
 92
 93	/*
 94	 * We don't allow notifications on value 0 anyway (pending_notify of
 95	 * 0 means "nothing pending").
 96	 */
 97	if (!addr)
 98		return -EINVAL;
 99
100	/*
101	 * Replace the old array with the new one, carefully: others can
102	 * be accessing it at the same time.
103	 */
104	new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
105		      GFP_KERNEL);
106	if (!new)
107		return -ENOMEM;
108
109	/* First make identical copy. */
110	memcpy(new->map, old->map, sizeof(old->map[0]) * old->num);
111	new->num = old->num;
112
113	/* Now append new entry. */
114	new->map[new->num].addr = addr;
115	new->map[new->num].event = eventfd_ctx_fdget(fd);
116	if (IS_ERR(new->map[new->num].event)) {
117		int err =  PTR_ERR(new->map[new->num].event);
118		kfree(new);
119		return err;
120	}
121	new->num++;
122
123	/*
124	 * Now put new one in place: rcu_assign_pointer() is a fancy way of
125	 * doing "lg->eventfds = new", but it uses memory barriers to make
126	 * absolutely sure that the contents of "new" written above is nailed
127	 * down before we actually do the assignment.
128	 *
129	 * We have to think about these kinds of things when we're operating on
130	 * live data without locks.
131	 */
132	rcu_assign_pointer(lg->eventfds, new);
133
134	/*
135	 * We're not in a big hurry.  Wait until no one's looking at old
136	 * version, then free it.
137	 */
138	synchronize_rcu();
139	kfree(old);
140
141	return 0;
142}
143
144/*L:052
145 * Receiving notifications from the Guest is usually done by attaching a
146 * particular LHCALL_NOTIFY value to an event filedescriptor.  The eventfd will
147 * become readable when the Guest does an LHCALL_NOTIFY with that value.
148 *
149 * This is really convenient for processing each virtqueue in a separate
150 * thread.
151 */
152static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
153{
154	unsigned long addr, fd;
155	int err;
156
157	if (get_user(addr, input) != 0)
158		return -EFAULT;
159	input++;
160	if (get_user(fd, input) != 0)
161		return -EFAULT;
162
163	/*
164	 * Just make sure two callers don't add eventfds at once.  We really
165	 * only need to lock against callers adding to the same Guest, so using
166	 * the Big Lguest Lock is overkill.  But this is setup, not a fast path.
167	 */
168	mutex_lock(&lguest_lock);
169	err = add_eventfd(lg, addr, fd);
170	mutex_unlock(&lguest_lock);
171
172	return err;
173}
174
175/*L:050
176 * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
177 * number to /dev/lguest.
178 */
179static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
180{
181	unsigned long irq;
182
183	if (get_user(irq, input) != 0)
184		return -EFAULT;
185	if (irq >= LGUEST_IRQS)
186		return -EINVAL;
187
188	/*
189	 * Next time the Guest runs, the core code will see if it can deliver
190	 * this interrupt.
191	 */
192	set_interrupt(cpu, irq);
193	return 0;
194}
195
196/*L:040
197 * Once our Guest is initialized, the Launcher makes it run by reading
198 * from /dev/lguest.
199 */
200static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
201{
202	struct lguest *lg = file->private_data;
203	struct lg_cpu *cpu;
204	unsigned int cpu_id = *o;
205
206	/* You must write LHREQ_INITIALIZE first! */
207	if (!lg)
208		return -EINVAL;
209
210	/* Watch out for arbitrary vcpu indexes! */
211	if (cpu_id >= lg->nr_cpus)
212		return -EINVAL;
213
214	cpu = &lg->cpus[cpu_id];
215
216	/* If you're not the task which owns the Guest, go away. */
217	if (current != cpu->tsk)
218		return -EPERM;
219
220	/* If the Guest is already dead, we indicate why */
221	if (lg->dead) {
222		size_t len;
223
224		/* lg->dead either contains an error code, or a string. */
225		if (IS_ERR(lg->dead))
226			return PTR_ERR(lg->dead);
227
228		/* We can only return as much as the buffer they read with. */
229		len = min(size, strlen(lg->dead)+1);
230		if (copy_to_user(user, lg->dead, len) != 0)
231			return -EFAULT;
232		return len;
233	}
234
235	/*
236	 * If we returned from read() last time because the Guest sent I/O,
237	 * clear the flag.
238	 */
239	if (cpu->pending_notify)
240		cpu->pending_notify = 0;
241
242	/* Run the Guest until something interesting happens. */
243	return run_guest(cpu, (unsigned long __user *)user);
244}
245
246/*L:025
247 * This actually initializes a CPU.  For the moment, a Guest is only
248 * uniprocessor, so "id" is always 0.
249 */
250static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
251{
252	/* We have a limited number the number of CPUs in the lguest struct. */
253	if (id >= ARRAY_SIZE(cpu->lg->cpus))
254		return -EINVAL;
255
256	/* Set up this CPU's id, and pointer back to the lguest struct. */
257	cpu->id = id;
258	cpu->lg = container_of((cpu - id), struct lguest, cpus[0]);
259	cpu->lg->nr_cpus++;
260
261	/* Each CPU has a timer it can set. */
262	init_clockdev(cpu);
263
264	/*
265	 * We need a complete page for the Guest registers: they are accessible
266	 * to the Guest and we can only grant it access to whole pages.
267	 */
268	cpu->regs_page = get_zeroed_page(GFP_KERNEL);
269	if (!cpu->regs_page)
270		return -ENOMEM;
271
272	/* We actually put the registers at the bottom of the page. */
273	cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
274
275	/*
276	 * Now we initialize the Guest's registers, handing it the start
277	 * address.
278	 */
279	lguest_arch_setup_regs(cpu, start_ip);
280
281	/*
282	 * We keep a pointer to the Launcher task (ie. current task) for when
283	 * other Guests want to wake this one (eg. console input).
284	 */
285	cpu->tsk = current;
286
287	/*
288	 * We need to keep a pointer to the Launcher's memory map, because if
289	 * the Launcher dies we need to clean it up.  If we don't keep a
290	 * reference, it is destroyed before close() is called.
291	 */
292	cpu->mm = get_task_mm(cpu->tsk);
293
294	/*
295	 * We remember which CPU's pages this Guest used last, for optimization
296	 * when the same Guest runs on the same CPU twice.
297	 */
298	cpu->last_pages = NULL;
299
300	/* No error == success. */
301	return 0;
302}
303
304/*L:020
305 * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in
306 * addition to the LHREQ_INITIALIZE value).  These are:
307 *
308 * base: The start of the Guest-physical memory inside the Launcher memory.
309 *
310 * pfnlimit: The highest (Guest-physical) page number the Guest should be
311 * allowed to access.  The Guest memory lives inside the Launcher, so it sets
312 * this to ensure the Guest can only reach its own memory.
313 *
314 * start: The first instruction to execute ("eip" in x86-speak).
315 */
316static int initialize(struct file *file, const unsigned long __user *input)
317{
318	/* "struct lguest" contains all we (the Host) know about a Guest. */
319	struct lguest *lg;
320	int err;
321	unsigned long args[3];
322
323	/*
324	 * We grab the Big Lguest lock, which protects against multiple
325	 * simultaneous initializations.
326	 */
327	mutex_lock(&lguest_lock);
328	/* You can't initialize twice!  Close the device and start again... */
329	if (file->private_data) {
330		err = -EBUSY;
331		goto unlock;
332	}
333
334	if (copy_from_user(args, input, sizeof(args)) != 0) {
335		err = -EFAULT;
336		goto unlock;
337	}
338
339	lg = kzalloc(sizeof(*lg), GFP_KERNEL);
340	if (!lg) {
341		err = -ENOMEM;
342		goto unlock;
343	}
344
345	lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
346	if (!lg->eventfds) {
347		err = -ENOMEM;
348		goto free_lg;
349	}
350	lg->eventfds->num = 0;
351
352	/* Populate the easy fields of our "struct lguest" */
353	lg->mem_base = (void __user *)args[0];
354	lg->pfn_limit = args[1];
355
356	/* This is the first cpu (cpu 0) and it will start booting at args[2] */
357	err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
358	if (err)
359		goto free_eventfds;
360
361	/*
362	 * Initialize the Guest's shadow page tables.  This allocates
363	 * memory, so can fail.
364	 */
365	err = init_guest_pagetable(lg);
366	if (err)
367		goto free_regs;
368
369	/* We keep our "struct lguest" in the file's private_data. */
370	file->private_data = lg;
371
372	mutex_unlock(&lguest_lock);
373
374	/* And because this is a write() call, we return the length used. */
375	return sizeof(args);
376
377free_regs:
378	/* FIXME: This should be in free_vcpu */
379	free_page(lg->cpus[0].regs_page);
380free_eventfds:
381	kfree(lg->eventfds);
382free_lg:
383	kfree(lg);
384unlock:
385	mutex_unlock(&lguest_lock);
386	return err;
387}
388
389/*L:010
390 * The first operation the Launcher does must be a write.  All writes
391 * start with an unsigned long number: for the first write this must be
392 * LHREQ_INITIALIZE to set up the Guest.  After that the Launcher can use
393 * writes of other values to send interrupts or set up receipt of notifications.
394 *
395 * Note that we overload the "offset" in the /dev/lguest file to indicate what
396 * CPU number we're dealing with.  Currently this is always 0 since we only
397 * support uniprocessor Guests, but you can see the beginnings of SMP support
398 * here.
399 */
400static ssize_t write(struct file *file, const char __user *in,
401		     size_t size, loff_t *off)
402{
403	/*
404	 * Once the Guest is initialized, we hold the "struct lguest" in the
405	 * file private data.
406	 */
407	struct lguest *lg = file->private_data;
408	const unsigned long __user *input = (const unsigned long __user *)in;
409	unsigned long req;
410	struct lg_cpu *uninitialized_var(cpu);
411	unsigned int cpu_id = *off;
412
413	/* The first value tells us what this request is. */
414	if (get_user(req, input) != 0)
415		return -EFAULT;
416	input++;
417
418	/* If you haven't initialized, you must do that first. */
419	if (req != LHREQ_INITIALIZE) {
420		if (!lg || (cpu_id >= lg->nr_cpus))
421			return -EINVAL;
422		cpu = &lg->cpus[cpu_id];
423
424		/* Once the Guest is dead, you can only read() why it died. */
425		if (lg->dead)
426			return -ENOENT;
427	}
428
429	switch (req) {
430	case LHREQ_INITIALIZE:
431		return initialize(file, input);
432	case LHREQ_IRQ:
433		return user_send_irq(cpu, input);
434	case LHREQ_EVENTFD:
435		return attach_eventfd(lg, input);
436	default:
437		return -EINVAL;
438	}
439}
440
441/*L:060
442 * The final piece of interface code is the close() routine.  It reverses
443 * everything done in initialize().  This is usually called because the
444 * Launcher exited.
445 *
446 * Note that the close routine returns 0 or a negative error number: it can't
447 * really fail, but it can whine.  I blame Sun for this wart, and K&R C for
448 * letting them do it.
449:*/
450static int close(struct inode *inode, struct file *file)
451{
452	struct lguest *lg = file->private_data;
453	unsigned int i;
454
455	/* If we never successfully initialized, there's nothing to clean up */
456	if (!lg)
457		return 0;
458
459	/*
460	 * We need the big lock, to protect from inter-guest I/O and other
461	 * Launchers initializing guests.
462	 */
463	mutex_lock(&lguest_lock);
464
465	/* Free up the shadow page tables for the Guest. */
466	free_guest_pagetable(lg);
467
468	for (i = 0; i < lg->nr_cpus; i++) {
469		/* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
470		hrtimer_cancel(&lg->cpus[i].hrt);
471		/* We can free up the register page we allocated. */
472		free_page(lg->cpus[i].regs_page);
473		/*
474		 * Now all the memory cleanups are done, it's safe to release
475		 * the Launcher's memory management structure.
476		 */
477		mmput(lg->cpus[i].mm);
478	}
479
480	/* Release any eventfds they registered. */
481	for (i = 0; i < lg->eventfds->num; i++)
482		eventfd_ctx_put(lg->eventfds->map[i].event);
483	kfree(lg->eventfds);
484
485	/*
486	 * If lg->dead doesn't contain an error code it will be NULL or a
487	 * kmalloc()ed string, either of which is ok to hand to kfree().
488	 */
489	if (!IS_ERR(lg->dead))
490		kfree(lg->dead);
491	/* Free the memory allocated to the lguest_struct */
492	kfree(lg);
493	/* Release lock and exit. */
494	mutex_unlock(&lguest_lock);
495
496	return 0;
497}
498
499/*L:000
500 * Welcome to our journey through the Launcher!
501 *
502 * The Launcher is the Host userspace program which sets up, runs and services
503 * the Guest.  In fact, many comments in the Drivers which refer to "the Host"
504 * doing things are inaccurate: the Launcher does all the device handling for
505 * the Guest, but the Guest can't know that.
506 *
507 * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we
508 * shall see more of that later.
509 *
510 * We begin our understanding with the Host kernel interface which the Launcher
511 * uses: reading and writing a character device called /dev/lguest.  All the
512 * work happens in the read(), write() and close() routines:
513 */
514static const struct file_operations lguest_fops = {
515	.owner	 = THIS_MODULE,
516	.release = close,
517	.write	 = write,
518	.read	 = read,
519	.llseek  = default_llseek,
520};
521/*:*/
522
523/*
524 * This is a textbook example of a "misc" character device.  Populate a "struct
525 * miscdevice" and register it with misc_register().
526 */
527static struct miscdevice lguest_dev = {
528	.minor	= MISC_DYNAMIC_MINOR,
529	.name	= "lguest",
530	.fops	= &lguest_fops,
531};
532
533int __init lguest_device_init(void)
534{
535	return misc_register(&lguest_dev);
536}
537
538void __exit lguest_device_remove(void)
539{
540	misc_deregister(&lguest_dev);
541}