diff options
author | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
---|---|---|
committer | Srikant Patnaik | 2015-01-11 12:28:04 +0530 |
commit | 871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch) | |
tree | 8718f573808810c2a1e8cb8fb6ac469093ca2784 /drivers/lguest | |
parent | 9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff) | |
download | FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2 FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip |
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized.
Changes are basically to make it look like kernel structure.
Diffstat (limited to 'drivers/lguest')
-rw-r--r-- | drivers/lguest/Kconfig | 14 | ||||
-rw-r--r-- | drivers/lguest/Makefile | 29 | ||||
-rw-r--r-- | drivers/lguest/README | 47 | ||||
-rw-r--r-- | drivers/lguest/core.c | 376 | ||||
-rw-r--r-- | drivers/lguest/hypercalls.c | 312 | ||||
-rw-r--r-- | drivers/lguest/interrupts_and_traps.c | 647 | ||||
-rw-r--r-- | drivers/lguest/lg.h | 260 | ||||
-rw-r--r-- | drivers/lguest/lguest_device.c | 531 | ||||
-rw-r--r-- | drivers/lguest/lguest_user.c | 542 | ||||
-rw-r--r-- | drivers/lguest/page_tables.c | 1152 | ||||
-rw-r--r-- | drivers/lguest/segments.c | 228 | ||||
-rw-r--r-- | drivers/lguest/x86/core.c | 715 | ||||
-rw-r--r-- | drivers/lguest/x86/switcher_32.S | 388 |
13 files changed, 5241 insertions, 0 deletions
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig new file mode 100644 index 00000000..34ae49dc --- /dev/null +++ b/drivers/lguest/Kconfig @@ -0,0 +1,14 @@ +config LGUEST + tristate "Linux hypervisor example code" + depends on X86_32 && EXPERIMENTAL && EVENTFD + select HVC_DRIVER + ---help--- + This is a very simple module which allows you to run + multiple instances of the same Linux kernel, using the + "lguest" command found in the Documentation/virtual/lguest + directory. + + Note that "lguest" is pronounced to rhyme with "fell quest", + not "rustyvisor". See Documentation/virtual/lguest/lguest.txt. + + If unsure, say N. If curious, say M. If masochistic, say Y. diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile new file mode 100644 index 00000000..c4197503 --- /dev/null +++ b/drivers/lguest/Makefile @@ -0,0 +1,29 @@ +# Guest requires the device configuration and probing code. +obj-$(CONFIG_LGUEST_GUEST) += lguest_device.o + +# Host requires the other files, which can be a module. +obj-$(CONFIG_LGUEST) += lg.o +lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \ + segments.o lguest_user.o + +lg-$(CONFIG_X86_32) += x86/switcher_32.o x86/core.o + +Preparation Preparation!: PREFIX=P +Guest: PREFIX=G +Drivers: PREFIX=D +Launcher: PREFIX=L +Host: PREFIX=H +Switcher: PREFIX=S +Mastery: PREFIX=M +Beer: + @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" +Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: + @sh ../../tools/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` +Puppy: + @clear + @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" + @sleep 2; clear; printf "\n\n Sit!\n\n"; sleep 1; clear + @printf " __ \n ()'\`; \n /\\|\` \n / | \n(/_)_|_ \n" + @sleep 2; clear; printf "\n\n Stand!\n\n"; sleep 1; clear + @printf " __ \n ()'\`; \n /\\|\` \n /._.= \n /| / \n(_\_)_ \n" + @sleep 2; clear; printf "\n\n Good puppy!\n\n"; sleep 1; clear diff --git a/drivers/lguest/README b/drivers/lguest/README new file mode 100644 index 00000000..b7db39a6 --- /dev/null +++ b/drivers/lguest/README @@ -0,0 +1,47 @@ +Welcome, friend reader, to lguest. + +Lguest is an adventure, with you, the reader, as Hero. I can't think of many +5000-line projects which offer both such capability and glimpses of future +potential; it is an exciting time to be delving into the source! + +But be warned; this is an arduous journey of several hours or more! And as we +know, all true Heroes are driven by a Noble Goal. Thus I offer a Beer (or +equivalent) to anyone I meet who has completed this documentation. + +So get comfortable and keep your wits about you (both quick and humorous). +Along your way to the Noble Goal, you will also gain masterly insight into +lguest, and hypervisors and x86 virtualization in general. + +Our Quest is in seven parts: (best read with C highlighting turned on) + +I) Preparation + - In which our potential hero is flown quickly over the landscape for a + taste of its scope. Suitable for the armchair coders and other such + persons of faint constitution. + +II) Guest + - Where we encounter the first tantalising wisps of code, and come to + understand the details of the life of a Guest kernel. + +III) Drivers + - Whereby the Guest finds its voice and become useful, and our + understanding of the Guest is completed. + +IV) Launcher + - Where we trace back to the creation of the Guest, and thus begin our + understanding of the Host. + +V) Host + - Where we master the Host code, through a long and tortuous journey. + Indeed, it is here that our hero is tested in the Bit of Despair. + +VI) Switcher + - Where our understanding of the intertwined nature of Guests and Hosts + is completed. + +VII) Mastery + - Where our fully fledged hero grapples with the Great Question: + "What next?" + +make Preparation! +Rusty Russell. diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c new file mode 100644 index 00000000..b5fdcb78 --- /dev/null +++ b/drivers/lguest/core.c @@ -0,0 +1,376 @@ +/*P:400 + * This contains run_guest() which actually calls into the Host<->Guest + * Switcher and analyzes the return, such as determining if the Guest wants the + * Host to do something. This file also contains useful helper routines. +:*/ +#include <linux/module.h> +#include <linux/stringify.h> +#include <linux/stddef.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/cpu.h> +#include <linux/freezer.h> +#include <linux/highmem.h> +#include <linux/slab.h> +#include <asm/paravirt.h> +#include <asm/pgtable.h> +#include <asm/uaccess.h> +#include <asm/poll.h> +#include <asm/asm-offsets.h> +#include "lg.h" + + +static struct vm_struct *switcher_vma; +static struct page **switcher_page; + +/* This One Big lock protects all inter-guest data structures. */ +DEFINE_MUTEX(lguest_lock); + +/*H:010 + * We need to set up the Switcher at a high virtual address. Remember the + * Switcher is a few hundred bytes of assembler code which actually changes the + * CPU to run the Guest, and then changes back to the Host when a trap or + * interrupt happens. + * + * The Switcher code must be at the same virtual address in the Guest as the + * Host since it will be running as the switchover occurs. + * + * Trying to map memory at a particular address is an unusual thing to do, so + * it's not a simple one-liner. + */ +static __init int map_switcher(void) +{ + int i, err; + struct page **pagep; + + /* + * Map the Switcher in to high memory. + * + * It turns out that if we choose the address 0xFFC00000 (4MB under the + * top virtual address), it makes setting up the page tables really + * easy. + */ + + /* + * We allocate an array of struct page pointers. map_vm_area() wants + * this, rather than just an array of pages. + */ + switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, + GFP_KERNEL); + if (!switcher_page) { + err = -ENOMEM; + goto out; + } + + /* + * Now we actually allocate the pages. The Guest will see these pages, + * so we make sure they're zeroed. + */ + for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { + switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); + if (!switcher_page[i]) { + err = -ENOMEM; + goto free_some_pages; + } + } + + /* + * First we check that the Switcher won't overlap the fixmap area at + * the top of memory. It's currently nowhere near, but it could have + * very strange effects if it ever happened. + */ + if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ + err = -ENOMEM; + printk("lguest: mapping switcher would thwack fixmap\n"); + goto free_pages; + } + + /* + * Now we reserve the "virtual memory area" we want: 0xFFC00000 + * (SWITCHER_ADDR). We might not get it in theory, but in practice + * it's worked so far. The end address needs +1 because __get_vm_area + * allocates an extra guard page, so we need space for that. + */ + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, + VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); + if (!switcher_vma) { + err = -ENOMEM; + printk("lguest: could not map switcher pages high\n"); + goto free_pages; + } + + /* + * This code actually sets up the pages we've allocated to appear at + * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the + * kind of pages we're mapping (kernel pages), and a pointer to our + * array of struct pages. It increments that pointer, but we don't + * care. + */ + pagep = switcher_page; + err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); + if (err) { + printk("lguest: map_vm_area failed: %i\n", err); + goto free_vma; + } + + /* + * Now the Switcher is mapped at the right address, we can't fail! + * Copy in the compiled-in Switcher code (from x86/switcher_32.S). + */ + memcpy(switcher_vma->addr, start_switcher_text, + end_switcher_text - start_switcher_text); + + printk(KERN_INFO "lguest: mapped switcher at %p\n", + switcher_vma->addr); + /* And we succeeded... */ + return 0; + +free_vma: + vunmap(switcher_vma->addr); +free_pages: + i = TOTAL_SWITCHER_PAGES; +free_some_pages: + for (--i; i >= 0; i--) + __free_pages(switcher_page[i], 0); + kfree(switcher_page); +out: + return err; +} +/*:*/ + +/* Cleaning up the mapping when the module is unloaded is almost... too easy. */ +static void unmap_switcher(void) +{ + unsigned int i; + + /* vunmap() undoes *both* map_vm_area() and __get_vm_area(). */ + vunmap(switcher_vma->addr); + /* Now we just need to free the pages we copied the switcher into */ + for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) + __free_pages(switcher_page[i], 0); + kfree(switcher_page); +} + +/*H:032 + * Dealing With Guest Memory. + * + * Before we go too much further into the Host, we need to grok the routines + * we use to deal with Guest memory. + * + * When the Guest gives us (what it thinks is) a physical address, we can use + * the normal copy_from_user() & copy_to_user() on the corresponding place in + * the memory region allocated by the Launcher. + * + * But we can't trust the Guest: it might be trying to access the Launcher + * code. We have to check that the range is below the pfn_limit the Launcher + * gave us. We have to make sure that addr + len doesn't give us a false + * positive by overflowing, too. + */ +bool lguest_address_ok(const struct lguest *lg, + unsigned long addr, unsigned long len) +{ + return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); +} + +/* + * This routine copies memory from the Guest. Here we can see how useful the + * kill_lguest() routine we met in the Launcher can be: we return a random + * value (all zeroes) instead of needing to return an error. + */ +void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) +{ + if (!lguest_address_ok(cpu->lg, addr, bytes) + || copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) { + /* copy_from_user should do this, but as we rely on it... */ + memset(b, 0, bytes); + kill_guest(cpu, "bad read address %#lx len %u", addr, bytes); + } +} + +/* This is the write (copy into Guest) version. */ +void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, + unsigned bytes) +{ + if (!lguest_address_ok(cpu->lg, addr, bytes) + || copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0) + kill_guest(cpu, "bad write address %#lx len %u", addr, bytes); +} +/*:*/ + +/*H:030 + * Let's jump straight to the the main loop which runs the Guest. + * Remember, this is called by the Launcher reading /dev/lguest, and we keep + * going around and around until something interesting happens. + */ +int run_guest(struct lg_cpu *cpu, unsigned long __user *user) +{ + /* We stop running once the Guest is dead. */ + while (!cpu->lg->dead) { + unsigned int irq; + bool more; + + /* First we run any hypercalls the Guest wants done. */ + if (cpu->hcall) + do_hypercalls(cpu); + + /* + * It's possible the Guest did a NOTIFY hypercall to the + * Launcher. + */ + if (cpu->pending_notify) { + /* + * Does it just needs to write to a registered + * eventfd (ie. the appropriate virtqueue thread)? + */ + if (!send_notify_to_eventfd(cpu)) { + /* OK, we tell the main Laucher. */ + if (put_user(cpu->pending_notify, user)) + return -EFAULT; + return sizeof(cpu->pending_notify); + } + } + + /* + * All long-lived kernel loops need to check with this horrible + * thing called the freezer. If the Host is trying to suspend, + * it stops us. + */ + try_to_freeze(); + + /* Check for signals */ + if (signal_pending(current)) + return -ERESTARTSYS; + + /* + * Check if there are any interrupts which can be delivered now: + * if so, this sets up the hander to be executed when we next + * run the Guest. + */ + irq = interrupt_pending(cpu, &more); + if (irq < LGUEST_IRQS) + try_deliver_interrupt(cpu, irq, more); + + /* + * Just make absolutely sure the Guest is still alive. One of + * those hypercalls could have been fatal, for example. + */ + if (cpu->lg->dead) + break; + + /* + * If the Guest asked to be stopped, we sleep. The Guest's + * clock timer will wake us. + */ + if (cpu->halted) { + set_current_state(TASK_INTERRUPTIBLE); + /* + * Just before we sleep, make sure no interrupt snuck in + * which we should be doing. + */ + if (interrupt_pending(cpu, &more) < LGUEST_IRQS) + set_current_state(TASK_RUNNING); + else + schedule(); + continue; + } + + /* + * OK, now we're ready to jump into the Guest. First we put up + * the "Do Not Disturb" sign: + */ + local_irq_disable(); + + /* Actually run the Guest until something happens. */ + lguest_arch_run_guest(cpu); + + /* Now we're ready to be interrupted or moved to other CPUs */ + local_irq_enable(); + + /* Now we deal with whatever happened to the Guest. */ + lguest_arch_handle_trap(cpu); + } + + /* Special case: Guest is 'dead' but wants a reboot. */ + if (cpu->lg->dead == ERR_PTR(-ERESTART)) + return -ERESTART; + + /* The Guest is dead => "No such file or directory" */ + return -ENOENT; +} + +/*H:000 + * Welcome to the Host! + * + * By this point your brain has been tickled by the Guest code and numbed by + * the Launcher code; prepare for it to be stretched by the Host code. This is + * the heart. Let's begin at the initialization routine for the Host's lg + * module. + */ +static int __init init(void) +{ + int err; + + /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */ + if (get_kernel_rpl() != 0) { + printk("lguest is afraid of being a guest\n"); + return -EPERM; + } + + /* First we put the Switcher up in very high virtual memory. */ + err = map_switcher(); + if (err) + goto out; + + /* Now we set up the pagetable implementation for the Guests. */ + err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES); + if (err) + goto unmap; + + /* We might need to reserve an interrupt vector. */ + err = init_interrupts(); + if (err) + goto free_pgtables; + + /* /dev/lguest needs to be registered. */ + err = lguest_device_init(); + if (err) + goto free_interrupts; + + /* Finally we do some architecture-specific setup. */ + lguest_arch_host_init(); + + /* All good! */ + return 0; + +free_interrupts: + free_interrupts(); +free_pgtables: + free_pagetables(); +unmap: + unmap_switcher(); +out: + return err; +} + +/* Cleaning up is just the same code, backwards. With a little French. */ +static void __exit fini(void) +{ + lguest_device_remove(); + free_interrupts(); + free_pagetables(); + unmap_switcher(); + + lguest_arch_host_fini(); +} +/*:*/ + +/* + * The Host side of lguest can be a module. This is a nice way for people to + * play with it. + */ +module_init(init); +module_exit(fini); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c new file mode 100644 index 00000000..83511eb0 --- /dev/null +++ b/drivers/lguest/hypercalls.c @@ -0,0 +1,312 @@ +/*P:500 + * Just as userspace programs request kernel operations through a system + * call, the Guest requests Host operations through a "hypercall". You might + * notice this nomenclature doesn't really follow any logic, but the name has + * been around for long enough that we're stuck with it. As you'd expect, this + * code is basically a one big switch statement. +:*/ + +/* Copyright (C) 2006 Rusty Russell IBM Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +*/ +#include <linux/uaccess.h> +#include <linux/syscalls.h> +#include <linux/mm.h> +#include <linux/ktime.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include "lg.h" + +/*H:120 + * This is the core hypercall routine: where the Guest gets what it wants. + * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. + */ +static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) +{ + switch (args->arg0) { + case LHCALL_FLUSH_ASYNC: + /* + * This call does nothing, except by breaking out of the Guest + * it makes us process all the asynchronous hypercalls. + */ + break; + case LHCALL_SEND_INTERRUPTS: + /* + * This call does nothing too, but by breaking out of the Guest + * it makes us process any pending interrupts. + */ + break; + case LHCALL_LGUEST_INIT: + /* + * You can't get here unless you're already initialized. Don't + * do that. + */ + kill_guest(cpu, "already have lguest_data"); + break; + case LHCALL_SHUTDOWN: { + char msg[128]; + /* + * Shutdown is such a trivial hypercall that we do it in five + * lines right here. + * + * If the lgread fails, it will call kill_guest() itself; the + * kill_guest() with the message will be ignored. + */ + __lgread(cpu, msg, args->arg1, sizeof(msg)); + msg[sizeof(msg)-1] = '\0'; + kill_guest(cpu, "CRASH: %s", msg); + if (args->arg2 == LGUEST_SHUTDOWN_RESTART) + cpu->lg->dead = ERR_PTR(-ERESTART); + break; + } + case LHCALL_FLUSH_TLB: + /* FLUSH_TLB comes in two flavors, depending on the argument: */ + if (args->arg1) + guest_pagetable_clear_all(cpu); + else + guest_pagetable_flush_user(cpu); + break; + + /* + * All these calls simply pass the arguments through to the right + * routines. + */ + case LHCALL_NEW_PGTABLE: + guest_new_pagetable(cpu, args->arg1); + break; + case LHCALL_SET_STACK: + guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); + break; + case LHCALL_SET_PTE: +#ifdef CONFIG_X86_PAE + guest_set_pte(cpu, args->arg1, args->arg2, + __pte(args->arg3 | (u64)args->arg4 << 32)); +#else + guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); +#endif + break; + case LHCALL_SET_PGD: + guest_set_pgd(cpu->lg, args->arg1, args->arg2); + break; +#ifdef CONFIG_X86_PAE + case LHCALL_SET_PMD: + guest_set_pmd(cpu->lg, args->arg1, args->arg2); + break; +#endif + case LHCALL_SET_CLOCKEVENT: + guest_set_clockevent(cpu, args->arg1); + break; + case LHCALL_TS: + /* This sets the TS flag, as we saw used in run_guest(). */ + cpu->ts = args->arg1; + break; + case LHCALL_HALT: + /* Similarly, this sets the halted flag for run_guest(). */ + cpu->halted = 1; + break; + case LHCALL_NOTIFY: + cpu->pending_notify = args->arg1; + break; + default: + /* It should be an architecture-specific hypercall. */ + if (lguest_arch_do_hcall(cpu, args)) + kill_guest(cpu, "Bad hypercall %li\n", args->arg0); + } +} + +/*H:124 + * Asynchronous hypercalls are easy: we just look in the array in the + * Guest's "struct lguest_data" to see if any new ones are marked "ready". + * + * We are careful to do these in order: obviously we respect the order the + * Guest put them in the ring, but we also promise the Guest that they will + * happen before any normal hypercall (which is why we check this before + * checking for a normal hcall). + */ +static void do_async_hcalls(struct lg_cpu *cpu) +{ + unsigned int i; + u8 st[LHCALL_RING_SIZE]; + + /* For simplicity, we copy the entire call status array in at once. */ + if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st))) + return; + + /* We process "struct lguest_data"s hcalls[] ring once. */ + for (i = 0; i < ARRAY_SIZE(st); i++) { + struct hcall_args args; + /* + * We remember where we were up to from last time. This makes + * sure that the hypercalls are done in the order the Guest + * places them in the ring. + */ + unsigned int n = cpu->next_hcall; + + /* 0xFF means there's no call here (yet). */ + if (st[n] == 0xFF) + break; + + /* + * OK, we have hypercall. Increment the "next_hcall" cursor, + * and wrap back to 0 if we reach the end. + */ + if (++cpu->next_hcall == LHCALL_RING_SIZE) + cpu->next_hcall = 0; + + /* + * Copy the hypercall arguments into a local copy of the + * hcall_args struct. + */ + if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], + sizeof(struct hcall_args))) { + kill_guest(cpu, "Fetching async hypercalls"); + break; + } + + /* Do the hypercall, same as a normal one. */ + do_hcall(cpu, &args); + + /* Mark the hypercall done. */ + if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) { + kill_guest(cpu, "Writing result for async hypercall"); + break; + } + + /* + * Stop doing hypercalls if they want to notify the Launcher: + * it needs to service this first. + */ + if (cpu->pending_notify) + break; + } +} + +/* + * Last of all, we look at what happens first of all. The very first time the + * Guest makes a hypercall, we end up here to set things up: + */ +static void initialize(struct lg_cpu *cpu) +{ + /* + * You can't do anything until you're initialized. The Guest knows the + * rules, so we're unforgiving here. + */ + if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { + kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); + return; + } + + if (lguest_arch_init_hypercalls(cpu)) + kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); + + /* + * The Guest tells us where we're not to deliver interrupts by putting + * the range of addresses into "struct lguest_data". + */ + if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) + || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) + kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); + + /* + * We write the current time into the Guest's data page once so it can + * set its clock. + */ + write_timestamp(cpu); + + /* page_tables.c will also do some setup. */ + page_table_guest_data_init(cpu); + + /* + * This is the one case where the above accesses might have been the + * first write to a Guest page. This may have caused a copy-on-write + * fault, but the old page might be (read-only) in the Guest + * pagetable. + */ + guest_pagetable_clear_all(cpu); +} +/*:*/ + +/*M:013 + * If a Guest reads from a page (so creates a mapping) that it has never + * written to, and then the Launcher writes to it (ie. the output of a virtual + * device), the Guest will still see the old page. In practice, this never + * happens: why would the Guest read a page which it has never written to? But + * a similar scenario might one day bite us, so it's worth mentioning. + * + * Note that if we used a shared anonymous mapping in the Launcher instead of + * mapping /dev/zero private, we wouldn't worry about cop-on-write. And we + * need that to switch the Launcher to processes (away from threads) anyway. +:*/ + +/*H:100 + * Hypercalls + * + * Remember from the Guest, hypercalls come in two flavors: normal and + * asynchronous. This file handles both of types. + */ +void do_hypercalls(struct lg_cpu *cpu) +{ + /* Not initialized yet? This hypercall must do it. */ + if (unlikely(!cpu->lg->lguest_data)) { + /* Set up the "struct lguest_data" */ + initialize(cpu); + /* Hcall is done. */ + cpu->hcall = NULL; + return; + } + + /* + * The Guest has initialized. + * + * Look in the hypercall ring for the async hypercalls: + */ + do_async_hcalls(cpu); + + /* + * If we stopped reading the hypercall ring because the Guest did a + * NOTIFY to the Launcher, we want to return now. Otherwise we do + * the hypercall. + */ + if (!cpu->pending_notify) { + do_hcall(cpu, cpu->hcall); + /* + * Tricky point: we reset the hcall pointer to mark the + * hypercall as "done". We use the hcall pointer rather than + * the trap number to indicate a hypercall is pending. + * Normally it doesn't matter: the Guest will run again and + * update the trap number before we come back here. + * + * However, if we are signalled or the Guest sends I/O to the + * Launcher, the run_guest() loop will exit without running the + * Guest. When it comes back it would try to re-run the + * hypercall. Finding that bug sucked. + */ + cpu->hcall = NULL; + } +} + +/* + * This routine supplies the Guest with time: it's used for wallclock time at + * initial boot and as a rough time source if the TSC isn't available. + */ +void write_timestamp(struct lg_cpu *cpu) +{ + struct timespec now; + ktime_get_real_ts(&now); + if (copy_to_user(&cpu->lg->lguest_data->time, + &now, sizeof(struct timespec))) + kill_guest(cpu, "Writing timestamp"); +} diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c new file mode 100644 index 00000000..28433a15 --- /dev/null +++ b/drivers/lguest/interrupts_and_traps.c @@ -0,0 +1,647 @@ +/*P:800 + * Interrupts (traps) are complicated enough to earn their own file. + * There are three classes of interrupts: + * + * 1) Real hardware interrupts which occur while we're running the Guest, + * 2) Interrupts for virtual devices attached to the Guest, and + * 3) Traps and faults from the Guest. + * + * Real hardware interrupts must be delivered to the Host, not the Guest. + * Virtual interrupts must be delivered to the Guest, but we make them look + * just like real hardware would deliver them. Traps from the Guest can be set + * up to go directly back into the Guest, but sometimes the Host wants to see + * them first, so we also have a way of "reflecting" them into the Guest as if + * they had been delivered to it directly. +:*/ +#include <linux/uaccess.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/sched.h> +#include "lg.h" + +/* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */ +static unsigned int syscall_vector = SYSCALL_VECTOR; +module_param(syscall_vector, uint, 0444); + +/* The address of the interrupt handler is split into two bits: */ +static unsigned long idt_address(u32 lo, u32 hi) +{ + return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); +} + +/* + * The "type" of the interrupt handler is a 4 bit field: we only support a + * couple of types. + */ +static int idt_type(u32 lo, u32 hi) +{ + return (hi >> 8) & 0xF; +} + +/* An IDT entry can't be used unless the "present" bit is set. */ +static bool idt_present(u32 lo, u32 hi) +{ + return (hi & 0x8000); +} + +/* + * We need a helper to "push" a value onto the Guest's stack, since that's a + * big part of what delivering an interrupt does. + */ +static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) +{ + /* Stack grows upwards: move stack then write value. */ + *gstack -= 4; + lgwrite(cpu, *gstack, u32, val); +} + +/*H:210 + * The set_guest_interrupt() routine actually delivers the interrupt or + * trap. The mechanics of delivering traps and interrupts to the Guest are the + * same, except some traps have an "error code" which gets pushed onto the + * stack as well: the caller tells us if this is one. + * + * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this + * interrupt or trap. It's split into two parts for traditional reasons: gcc + * on i386 used to be frightened by 64 bit numbers. + * + * We set up the stack just like the CPU does for a real interrupt, so it's + * identical for the Guest (and the standard "iret" instruction will undo + * it). + */ +static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, + bool has_err) +{ + unsigned long gstack, origstack; + u32 eflags, ss, irq_enable; + unsigned long virtstack; + + /* + * There are two cases for interrupts: one where the Guest is already + * in the kernel, and a more complex one where the Guest is in + * userspace. We check the privilege level to find out. + */ + if ((cpu->regs->ss&0x3) != GUEST_PL) { + /* + * The Guest told us their kernel stack with the SET_STACK + * hypercall: both the virtual address and the segment. + */ + virtstack = cpu->esp1; + ss = cpu->ss1; + + origstack = gstack = guest_pa(cpu, virtstack); + /* + * We push the old stack segment and pointer onto the new + * stack: when the Guest does an "iret" back from the interrupt + * handler the CPU will notice they're dropping privilege + * levels and expect these here. + */ + push_guest_stack(cpu, &gstack, cpu->regs->ss); + push_guest_stack(cpu, &gstack, cpu->regs->esp); + } else { + /* We're staying on the same Guest (kernel) stack. */ + virtstack = cpu->regs->esp; + ss = cpu->regs->ss; + + origstack = gstack = guest_pa(cpu, virtstack); + } + + /* + * Remember that we never let the Guest actually disable interrupts, so + * the "Interrupt Flag" bit is always set. We copy that bit from the + * Guest's "irq_enabled" field into the eflags word: we saw the Guest + * copy it back in "lguest_iret". + */ + eflags = cpu->regs->eflags; + if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 + && !(irq_enable & X86_EFLAGS_IF)) + eflags &= ~X86_EFLAGS_IF; + + /* + * An interrupt is expected to push three things on the stack: the old + * "eflags" word, the old code segment, and the old instruction + * pointer. + */ + push_guest_stack(cpu, &gstack, eflags); + push_guest_stack(cpu, &gstack, cpu->regs->cs); + push_guest_stack(cpu, &gstack, cpu->regs->eip); + + /* For the six traps which supply an error code, we push that, too. */ + if (has_err) + push_guest_stack(cpu, &gstack, cpu->regs->errcode); + + /* + * Now we've pushed all the old state, we change the stack, the code + * segment and the address to execute. + */ + cpu->regs->ss = ss; + cpu->regs->esp = virtstack + (gstack - origstack); + cpu->regs->cs = (__KERNEL_CS|GUEST_PL); + cpu->regs->eip = idt_address(lo, hi); + + /* + * There are two kinds of interrupt handlers: 0xE is an "interrupt + * gate" which expects interrupts to be disabled on entry. + */ + if (idt_type(lo, hi) == 0xE) + if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) + kill_guest(cpu, "Disabling interrupts"); +} + +/*H:205 + * Virtual Interrupts. + * + * interrupt_pending() returns the first pending interrupt which isn't blocked + * by the Guest. It is called before every entry to the Guest, and just before + * we go to sleep when the Guest has halted itself. + */ +unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) +{ + unsigned int irq; + DECLARE_BITMAP(blk, LGUEST_IRQS); + + /* If the Guest hasn't even initialized yet, we can do nothing. */ + if (!cpu->lg->lguest_data) + return LGUEST_IRQS; + + /* + * Take our "irqs_pending" array and remove any interrupts the Guest + * wants blocked: the result ends up in "blk". + */ + if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, + sizeof(blk))) + return LGUEST_IRQS; + bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); + + /* Find the first interrupt. */ + irq = find_first_bit(blk, LGUEST_IRQS); + *more = find_next_bit(blk, LGUEST_IRQS, irq+1); + + return irq; +} + +/* + * This actually diverts the Guest to running an interrupt handler, once an + * interrupt has been identified by interrupt_pending(). + */ +void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) +{ + struct desc_struct *idt; + + BUG_ON(irq >= LGUEST_IRQS); + + /* + * They may be in the middle of an iret, where they asked us never to + * deliver interrupts. + */ + if (cpu->regs->eip >= cpu->lg->noirq_start && + (cpu->regs->eip < cpu->lg->noirq_end)) + return; + + /* If they're halted, interrupts restart them. */ + if (cpu->halted) { + /* Re-enable interrupts. */ + if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled)) + kill_guest(cpu, "Re-enabling interrupts"); + cpu->halted = 0; + } else { + /* Otherwise we check if they have interrupts disabled. */ + u32 irq_enabled; + if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled)) + irq_enabled = 0; + if (!irq_enabled) { + /* Make sure they know an IRQ is pending. */ + put_user(X86_EFLAGS_IF, + &cpu->lg->lguest_data->irq_pending); + return; + } + } + + /* + * Look at the IDT entry the Guest gave us for this interrupt. The + * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip + * over them. + */ + idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; + /* If they don't have a handler (yet?), we just ignore it */ + if (idt_present(idt->a, idt->b)) { + /* OK, mark it no longer pending and deliver it. */ + clear_bit(irq, cpu->irqs_pending); + /* + * set_guest_interrupt() takes the interrupt descriptor and a + * flag to say whether this interrupt pushes an error code onto + * the stack as well: virtual interrupts never do. + */ + set_guest_interrupt(cpu, idt->a, idt->b, false); + } + + /* + * Every time we deliver an interrupt, we update the timestamp in the + * Guest's lguest_data struct. It would be better for the Guest if we + * did this more often, but it can actually be quite slow: doing it + * here is a compromise which means at least it gets updated every + * timer interrupt. + */ + write_timestamp(cpu); + + /* + * If there are no other interrupts we want to deliver, clear + * the pending flag. + */ + if (!more) + put_user(0, &cpu->lg->lguest_data->irq_pending); +} + +/* And this is the routine when we want to set an interrupt for the Guest. */ +void set_interrupt(struct lg_cpu *cpu, unsigned int irq) +{ + /* + * Next time the Guest runs, the core code will see if it can deliver + * this interrupt. + */ + set_bit(irq, cpu->irqs_pending); + + /* + * Make sure it sees it; it might be asleep (eg. halted), or running + * the Guest right now, in which case kick_process() will knock it out. + */ + if (!wake_up_process(cpu->tsk)) + kick_process(cpu->tsk); +} +/*:*/ + +/* + * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent + * me a patch, so we support that too. It'd be a big step for lguest if half + * the Plan 9 user base were to start using it. + * + * Actually now I think of it, it's possible that Ron *is* half the Plan 9 + * userbase. Oh well. + */ +static bool could_be_syscall(unsigned int num) +{ + /* Normal Linux SYSCALL_VECTOR or reserved vector? */ + return num == SYSCALL_VECTOR || num == syscall_vector; +} + +/* The syscall vector it wants must be unused by Host. */ +bool check_syscall_vector(struct lguest *lg) +{ + u32 vector; + + if (get_user(vector, &lg->lguest_data->syscall_vec)) + return false; + + return could_be_syscall(vector); +} + +int init_interrupts(void) +{ + /* If they want some strange system call vector, reserve it now */ + if (syscall_vector != SYSCALL_VECTOR) { + if (test_bit(syscall_vector, used_vectors) || + vector_used_by_percpu_irq(syscall_vector)) { + printk(KERN_ERR "lg: couldn't reserve syscall %u\n", + syscall_vector); + return -EBUSY; + } + set_bit(syscall_vector, used_vectors); + } + + return 0; +} + +void free_interrupts(void) +{ + if (syscall_vector != SYSCALL_VECTOR) + clear_bit(syscall_vector, used_vectors); +} + +/*H:220 + * Now we've got the routines to deliver interrupts, delivering traps like + * page fault is easy. The only trick is that Intel decided that some traps + * should have error codes: + */ +static bool has_err(unsigned int trap) +{ + return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); +} + +/* deliver_trap() returns true if it could deliver the trap. */ +bool deliver_trap(struct lg_cpu *cpu, unsigned int num) +{ + /* + * Trap numbers are always 8 bit, but we set an impossible trap number + * for traps inside the Switcher, so check that here. + */ + if (num >= ARRAY_SIZE(cpu->arch.idt)) + return false; + + /* + * Early on the Guest hasn't set the IDT entries (or maybe it put a + * bogus one in): if we fail here, the Guest will be killed. + */ + if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) + return false; + set_guest_interrupt(cpu, cpu->arch.idt[num].a, + cpu->arch.idt[num].b, has_err(num)); + return true; +} + +/*H:250 + * Here's the hard part: returning to the Host every time a trap happens + * and then calling deliver_trap() and re-entering the Guest is slow. + * Particularly because Guest userspace system calls are traps (usually trap + * 128). + * + * So we'd like to set up the IDT to tell the CPU to deliver traps directly + * into the Guest. This is possible, but the complexities cause the size of + * this file to double! However, 150 lines of code is worth writing for taking + * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all + * the other hypervisors would beat it up at lunchtime. + * + * This routine indicates if a particular trap number could be delivered + * directly. + */ +static bool direct_trap(unsigned int num) +{ + /* + * Hardware interrupts don't go to the Guest at all (except system + * call). + */ + if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) + return false; + + /* + * The Host needs to see page faults (for shadow paging and to save the + * fault address), general protection faults (in/out emulation) and + * device not available (TS handling) and of course, the hypercall trap. + */ + return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY; +} +/*:*/ + +/*M:005 + * The Guest has the ability to turn its interrupt gates into trap gates, + * if it is careful. The Host will let trap gates can go directly to the + * Guest, but the Guest needs the interrupts atomically disabled for an + * interrupt gate. It can do this by pointing the trap gate at instructions + * within noirq_start and noirq_end, where it can safely disable interrupts. + */ + +/*M:006 + * The Guests do not use the sysenter (fast system call) instruction, + * because it's hardcoded to enter privilege level 0 and so can't go direct. + * It's about twice as fast as the older "int 0x80" system call, so it might + * still be worthwhile to handle it in the Switcher and lcall down to the + * Guest. The sysenter semantics are hairy tho: search for that keyword in + * entry.S +:*/ + +/*H:260 + * When we make traps go directly into the Guest, we need to make sure + * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the + * CPU trying to deliver the trap will fault while trying to push the interrupt + * words on the stack: this is called a double fault, and it forces us to kill + * the Guest. + * + * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. + */ +void pin_stack_pages(struct lg_cpu *cpu) +{ + unsigned int i; + + /* + * Depending on the CONFIG_4KSTACKS option, the Guest can have one or + * two pages of stack space. + */ + for (i = 0; i < cpu->lg->stack_pages; i++) + /* + * The stack grows *upwards*, so the address we're given is the + * start of the page after the kernel stack. Subtract one to + * get back onto the first stack page, and keep subtracting to + * get to the rest of the stack pages. + */ + pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); +} + +/* + * Direct traps also mean that we need to know whenever the Guest wants to use + * a different kernel stack, so we can change the guest TSS to use that + * stack. The TSS entries expect a virtual address, so unlike most addresses + * the Guest gives us, the "esp" (stack pointer) value here is virtual, not + * physical. + * + * In Linux each process has its own kernel stack, so this happens a lot: we + * change stacks on each context switch. + */ +void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) +{ + /* + * You're not allowed a stack segment with privilege level 0: bad Guest! + */ + if ((seg & 0x3) != GUEST_PL) + kill_guest(cpu, "bad stack segment %i", seg); + /* We only expect one or two stack pages. */ + if (pages > 2) + kill_guest(cpu, "bad stack pages %u", pages); + /* Save where the stack is, and how many pages */ + cpu->ss1 = seg; + cpu->esp1 = esp; + cpu->lg->stack_pages = pages; + /* Make sure the new stack pages are mapped */ + pin_stack_pages(cpu); +} + +/* + * All this reference to mapping stacks leads us neatly into the other complex + * part of the Host: page table handling. + */ + +/*H:235 + * This is the routine which actually checks the Guest's IDT entry and + * transfers it into the entry in "struct lguest": + */ +static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, + unsigned int num, u32 lo, u32 hi) +{ + u8 type = idt_type(lo, hi); + + /* We zero-out a not-present entry */ + if (!idt_present(lo, hi)) { + trap->a = trap->b = 0; + return; + } + + /* We only support interrupt and trap gates. */ + if (type != 0xE && type != 0xF) + kill_guest(cpu, "bad IDT type %i", type); + + /* + * We only copy the handler address, present bit, privilege level and + * type. The privilege level controls where the trap can be triggered + * manually with an "int" instruction. This is usually GUEST_PL, + * except for system calls which userspace can use. + */ + trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); + trap->b = (hi&0xFFFFEF00); +} + +/*H:230 + * While we're here, dealing with delivering traps and interrupts to the + * Guest, we might as well complete the picture: how the Guest tells us where + * it wants them to go. This would be simple, except making traps fast + * requires some tricks. + * + * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the + * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. + */ +void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) +{ + /* + * Guest never handles: NMI, doublefault, spurious interrupt or + * hypercall. We ignore when it tries to set them. + */ + if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) + return; + + /* + * Mark the IDT as changed: next time the Guest runs we'll know we have + * to copy this again. + */ + cpu->changed |= CHANGED_IDT; + + /* Check that the Guest doesn't try to step outside the bounds. */ + if (num >= ARRAY_SIZE(cpu->arch.idt)) + kill_guest(cpu, "Setting idt entry %u", num); + else + set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); +} + +/* + * The default entry for each interrupt points into the Switcher routines which + * simply return to the Host. The run_guest() loop will then call + * deliver_trap() to bounce it back into the Guest. + */ +static void default_idt_entry(struct desc_struct *idt, + int trap, + const unsigned long handler, + const struct desc_struct *base) +{ + /* A present interrupt gate. */ + u32 flags = 0x8e00; + + /* + * Set the privilege level on the entry for the hypercall: this allows + * the Guest to use the "int" instruction to trigger it. + */ + if (trap == LGUEST_TRAP_ENTRY) + flags |= (GUEST_PL << 13); + else if (base) + /* + * Copy privilege level from what Guest asked for. This allows + * debug (int 3) traps from Guest userspace, for example. + */ + flags |= (base->b & 0x6000); + + /* Now pack it into the IDT entry in its weird format. */ + idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF); + idt->b = (handler&0xFFFF0000) | flags; +} + +/* When the Guest first starts, we put default entries into the IDT. */ +void setup_default_idt_entries(struct lguest_ro_state *state, + const unsigned long *def) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++) + default_idt_entry(&state->guest_idt[i], i, def[i], NULL); +} + +/*H:240 + * We don't use the IDT entries in the "struct lguest" directly, instead + * we copy them into the IDT which we've set up for Guests on this CPU, just + * before we run the Guest. This routine does that copy. + */ +void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, + const unsigned long *def) +{ + unsigned int i; + + /* + * We can simply copy the direct traps, otherwise we use the default + * ones in the Switcher: they will return to the Host. + */ + for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { + const struct desc_struct *gidt = &cpu->arch.idt[i]; + + /* If no Guest can ever override this trap, leave it alone. */ + if (!direct_trap(i)) + continue; + + /* + * Only trap gates (type 15) can go direct to the Guest. + * Interrupt gates (type 14) disable interrupts as they are + * entered, which we never let the Guest do. Not present + * entries (type 0x0) also can't go direct, of course. + * + * If it can't go direct, we still need to copy the priv. level: + * they might want to give userspace access to a software + * interrupt. + */ + if (idt_type(gidt->a, gidt->b) == 0xF) + idt[i] = *gidt; + else + default_idt_entry(&idt[i], i, def[i], gidt); + } +} + +/*H:200 + * The Guest Clock. + * + * There are two sources of virtual interrupts. We saw one in lguest_user.c: + * the Launcher sending interrupts for virtual devices. The other is the Guest + * timer interrupt. + * + * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to + * the next timer interrupt (in nanoseconds). We use the high-resolution timer + * infrastructure to set a callback at that time. + * + * 0 means "turn off the clock". + */ +void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) +{ + ktime_t expires; + + if (unlikely(delta == 0)) { + /* Clock event device is shutting down. */ + hrtimer_cancel(&cpu->hrt); + return; + } + + /* + * We use wallclock time here, so the Guest might not be running for + * all the time between now and the timer interrupt it asked for. This + * is almost always the right thing to do. + */ + expires = ktime_add_ns(ktime_get_real(), delta); + hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); +} + +/* This is the function called when the Guest's timer expires. */ +static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) +{ + struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt); + + /* Remember the first interrupt is the timer interrupt. */ + set_interrupt(cpu, 0); + return HRTIMER_NORESTART; +} + +/* This sets up the timer for this Guest. */ +void init_clockdev(struct lg_cpu *cpu) +{ + hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); + cpu->hrt.function = clockdev_fn; +} diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h new file mode 100644 index 00000000..295df06e --- /dev/null +++ b/drivers/lguest/lg.h @@ -0,0 +1,260 @@ +#ifndef _LGUEST_H +#define _LGUEST_H + +#ifndef __ASSEMBLY__ +#include <linux/types.h> +#include <linux/init.h> +#include <linux/stringify.h> +#include <linux/lguest.h> +#include <linux/lguest_launcher.h> +#include <linux/wait.h> +#include <linux/hrtimer.h> +#include <linux/err.h> +#include <linux/slab.h> + +#include <asm/lguest.h> + +void free_pagetables(void); +int init_pagetables(struct page **switcher_page, unsigned int pages); + +struct pgdir { + unsigned long gpgdir; + pgd_t *pgdir; +}; + +/* We have two pages shared with guests, per cpu. */ +struct lguest_pages { + /* This is the stack page mapped rw in guest */ + char spare[PAGE_SIZE - sizeof(struct lguest_regs)]; + struct lguest_regs regs; + + /* This is the host state & guest descriptor page, ro in guest */ + struct lguest_ro_state state; +} __attribute__((aligned(PAGE_SIZE))); + +#define CHANGED_IDT 1 +#define CHANGED_GDT 2 +#define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */ +#define CHANGED_ALL 3 + +struct lg_cpu { + unsigned int id; + struct lguest *lg; + struct task_struct *tsk; + struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */ + + u32 cr2; + int ts; + u32 esp1; + u16 ss1; + + /* Bitmap of what has changed: see CHANGED_* above. */ + int changed; + + unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ + + /* At end of a page shared mapped over lguest_pages in guest. */ + unsigned long regs_page; + struct lguest_regs *regs; + + struct lguest_pages *last_pages; + + /* Initialization mode: linear map everything. */ + bool linear_pages; + int cpu_pgd; /* Which pgd this cpu is currently using */ + + /* If a hypercall was asked for, this points to the arguments. */ + struct hcall_args *hcall; + u32 next_hcall; + + /* Virtual clock device */ + struct hrtimer hrt; + + /* Did the Guest tell us to halt? */ + int halted; + + /* Pending virtual interrupts */ + DECLARE_BITMAP(irqs_pending, LGUEST_IRQS); + + struct lg_cpu_arch arch; +}; + +struct lg_eventfd { + unsigned long addr; + struct eventfd_ctx *event; +}; + +struct lg_eventfd_map { + unsigned int num; + struct lg_eventfd map[]; +}; + +/* The private info the thread maintains about the guest. */ +struct lguest { + struct lguest_data __user *lguest_data; + struct lg_cpu cpus[NR_CPUS]; + unsigned int nr_cpus; + + u32 pfn_limit; + + /* + * This provides the offset to the base of guest-physical memory in the + * Launcher. + */ + void __user *mem_base; + unsigned long kernel_address; + + struct pgdir pgdirs[4]; + + unsigned long noirq_start, noirq_end; + + unsigned int stack_pages; + u32 tsc_khz; + + struct lg_eventfd_map *eventfds; + + /* Dead? */ + const char *dead; +}; + +extern struct mutex lguest_lock; + +/* core.c: */ +bool lguest_address_ok(const struct lguest *lg, + unsigned long addr, unsigned long len); +void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); +void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); + +/*H:035 + * Using memory-copy operations like that is usually inconvient, so we + * have the following helper macros which read and write a specific type (often + * an unsigned long). + * + * This reads into a variable of the given type then returns that. + */ +#define lgread(cpu, addr, type) \ + ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) + +/* This checks that the variable is of the given type, then writes it out. */ +#define lgwrite(cpu, addr, type, val) \ + do { \ + typecheck(type, val); \ + __lgwrite((cpu), (addr), &(val), sizeof(val)); \ + } while(0) +/* (end of memory access helper routines) :*/ + +int run_guest(struct lg_cpu *cpu, unsigned long __user *user); + +/* + * Helper macros to obtain the first 12 or the last 20 bits, this is only the + * first step in the migration to the kernel types. pte_pfn is already defined + * in the kernel. + */ +#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) +#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) +#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) +#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT) + +/* interrupts_and_traps.c: */ +unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more); +void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more); +void set_interrupt(struct lg_cpu *cpu, unsigned int irq); +bool deliver_trap(struct lg_cpu *cpu, unsigned int num); +void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, + u32 low, u32 hi); +void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages); +void pin_stack_pages(struct lg_cpu *cpu); +void setup_default_idt_entries(struct lguest_ro_state *state, + const unsigned long *def); +void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, + const unsigned long *def); +void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta); +bool send_notify_to_eventfd(struct lg_cpu *cpu); +void init_clockdev(struct lg_cpu *cpu); +bool check_syscall_vector(struct lguest *lg); +int init_interrupts(void); +void free_interrupts(void); + +/* segments.c: */ +void setup_default_gdt_entries(struct lguest_ro_state *state); +void setup_guest_gdt(struct lg_cpu *cpu); +void load_guest_gdt_entry(struct lg_cpu *cpu, unsigned int i, + u32 low, u32 hi); +void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array); +void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt); +void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); + +/* page_tables.c: */ +int init_guest_pagetable(struct lguest *lg); +void free_guest_pagetable(struct lguest *lg); +void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); +void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i); +#ifdef CONFIG_X86_PAE +void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); +#endif +void guest_pagetable_clear_all(struct lg_cpu *cpu); +void guest_pagetable_flush_user(struct lg_cpu *cpu); +void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, + unsigned long vaddr, pte_t val); +void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages); +bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode); +void pin_page(struct lg_cpu *cpu, unsigned long vaddr); +unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr); +void page_table_guest_data_init(struct lg_cpu *cpu); + +/* <arch>/core.c: */ +void lguest_arch_host_init(void); +void lguest_arch_host_fini(void); +void lguest_arch_run_guest(struct lg_cpu *cpu); +void lguest_arch_handle_trap(struct lg_cpu *cpu); +int lguest_arch_init_hypercalls(struct lg_cpu *cpu); +int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args); +void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start); + +/* <arch>/switcher.S: */ +extern char start_switcher_text[], end_switcher_text[], switch_to_guest[]; + +/* lguest_user.c: */ +int lguest_device_init(void); +void lguest_device_remove(void); + +/* hypercalls.c: */ +void do_hypercalls(struct lg_cpu *cpu); +void write_timestamp(struct lg_cpu *cpu); + +/*L:035 + * Let's step aside for the moment, to study one important routine that's used + * widely in the Host code. + * + * There are many cases where the Guest can do something invalid, like pass crap + * to a hypercall. Since only the Guest kernel can make hypercalls, it's quite + * acceptable to simply terminate the Guest and give the Launcher a nicely + * formatted reason. It's also simpler for the Guest itself, which doesn't + * need to check most hypercalls for "success"; if you're still running, it + * succeeded. + * + * Once this is called, the Guest will never run again, so most Host code can + * call this then continue as if nothing had happened. This means many + * functions don't have to explicitly return an error code, which keeps the + * code simple. + * + * It also means that this can be called more than once: only the first one is + * remembered. The only trick is that we still need to kill the Guest even if + * we can't allocate memory to store the reason. Linux has a neat way of + * packing error codes into invalid pointers, so we use that here. + * + * Like any macro which uses an "if", it is safely wrapped in a run-once "do { + * } while(0)". + */ +#define kill_guest(cpu, fmt...) \ +do { \ + if (!(cpu)->lg->dead) { \ + (cpu)->lg->dead = kasprintf(GFP_ATOMIC, fmt); \ + if (!(cpu)->lg->dead) \ + (cpu)->lg->dead = ERR_PTR(-ENOMEM); \ + } \ +} while(0) +/* (End of aside) :*/ + +#endif /* __ASSEMBLY__ */ +#endif /* _LGUEST_H */ diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c new file mode 100644 index 00000000..9e8388ef --- /dev/null +++ b/drivers/lguest/lguest_device.c @@ -0,0 +1,531 @@ +/*P:050 + * Lguest guests use a very simple method to describe devices. It's a + * series of device descriptors contained just above the top of normal Guest + * memory. + * + * We use the standard "virtio" device infrastructure, which provides us with a + * console, a network and a block driver. Each one expects some configuration + * information and a "virtqueue" or two to send and receive data. +:*/ +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/lguest_launcher.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/interrupt.h> +#include <linux/virtio_ring.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <asm/io.h> +#include <asm/paravirt.h> +#include <asm/lguest_hcall.h> + +/* The pointer to our (page) of device descriptions. */ +static void *lguest_devices; + +/* + * For Guests, device memory can be used as normal memory, so we cast away the + * __iomem to quieten sparse. + */ +static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) +{ + return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); +} + +static inline void lguest_unmap(void *addr) +{ + iounmap((__force void __iomem *)addr); +} + +/*D:100 + * Each lguest device is just a virtio device plus a pointer to its entry + * in the lguest_devices page. + */ +struct lguest_device { + struct virtio_device vdev; + + /* The entry in the lguest_devices page for this device. */ + struct lguest_device_desc *desc; +}; + +/* + * Since the virtio infrastructure hands us a pointer to the virtio_device all + * the time, it helps to have a curt macro to get a pointer to the struct + * lguest_device it's enclosed in. + */ +#define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) + +/*D:130 + * Device configurations + * + * The configuration information for a device consists of one or more + * virtqueues, a feature bitmap, and some configuration bytes. The + * configuration bytes don't really matter to us: the Launcher sets them up, and + * the driver will look at them during setup. + * + * A convenient routine to return the device's virtqueue config array: + * immediately after the descriptor. + */ +static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) +{ + return (void *)(desc + 1); +} + +/* The features come immediately after the virtqueues. */ +static u8 *lg_features(const struct lguest_device_desc *desc) +{ + return (void *)(lg_vq(desc) + desc->num_vq); +} + +/* The config space comes after the two feature bitmasks. */ +static u8 *lg_config(const struct lguest_device_desc *desc) +{ + return lg_features(desc) + desc->feature_len * 2; +} + +/* The total size of the config page used by this device (incl. desc) */ +static unsigned desc_size(const struct lguest_device_desc *desc) +{ + return sizeof(*desc) + + desc->num_vq * sizeof(struct lguest_vqconfig) + + desc->feature_len * 2 + + desc->config_len; +} + +/* This gets the device's feature bits. */ +static u32 lg_get_features(struct virtio_device *vdev) +{ + unsigned int i; + u32 features = 0; + struct lguest_device_desc *desc = to_lgdev(vdev)->desc; + u8 *in_features = lg_features(desc); + + /* We do this the slow but generic way. */ + for (i = 0; i < min(desc->feature_len * 8, 32); i++) + if (in_features[i / 8] & (1 << (i % 8))) + features |= (1 << i); + + return features; +} + +/* + * To notify on reset or feature finalization, we (ab)use the NOTIFY + * hypercall, with the descriptor address of the device. + */ +static void status_notify(struct virtio_device *vdev) +{ + unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; + + hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0); +} + +/* + * The virtio core takes the features the Host offers, and copies the ones + * supported by the driver into the vdev->features array. Once that's all + * sorted out, this routine is called so we can tell the Host which features we + * understand and accept. + */ +static void lg_finalize_features(struct virtio_device *vdev) +{ + unsigned int i, bits; + struct lguest_device_desc *desc = to_lgdev(vdev)->desc; + /* Second half of bitmap is features we accept. */ + u8 *out_features = lg_features(desc) + desc->feature_len; + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + /* + * The vdev->feature array is a Linux bitmask: this isn't the same as a + * the simple array of bits used by lguest devices for features. So we + * do this slow, manual conversion which is completely general. + */ + memset(out_features, 0, desc->feature_len); + bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; + for (i = 0; i < bits; i++) { + if (test_bit(i, vdev->features)) + out_features[i / 8] |= (1 << (i % 8)); + } + + /* Tell Host we've finished with this device's feature negotiation */ + status_notify(vdev); +} + +/* Once they've found a field, getting a copy of it is easy. */ +static void lg_get(struct virtio_device *vdev, unsigned int offset, + void *buf, unsigned len) +{ + struct lguest_device_desc *desc = to_lgdev(vdev)->desc; + + /* Check they didn't ask for more than the length of the config! */ + BUG_ON(offset + len > desc->config_len); + memcpy(buf, lg_config(desc) + offset, len); +} + +/* Setting the contents is also trivial. */ +static void lg_set(struct virtio_device *vdev, unsigned int offset, + const void *buf, unsigned len) +{ + struct lguest_device_desc *desc = to_lgdev(vdev)->desc; + + /* Check they didn't ask for more than the length of the config! */ + BUG_ON(offset + len > desc->config_len); + memcpy(lg_config(desc) + offset, buf, len); +} + +/* + * The operations to get and set the status word just access the status field + * of the device descriptor. + */ +static u8 lg_get_status(struct virtio_device *vdev) +{ + return to_lgdev(vdev)->desc->status; +} + +static void lg_set_status(struct virtio_device *vdev, u8 status) +{ + BUG_ON(!status); + to_lgdev(vdev)->desc->status = status; + + /* Tell Host immediately if we failed. */ + if (status & VIRTIO_CONFIG_S_FAILED) + status_notify(vdev); +} + +static void lg_reset(struct virtio_device *vdev) +{ + /* 0 status means "reset" */ + to_lgdev(vdev)->desc->status = 0; + status_notify(vdev); +} + +/* + * Virtqueues + * + * The other piece of infrastructure virtio needs is a "virtqueue": a way of + * the Guest device registering buffers for the other side to read from or + * write into (ie. send and receive buffers). Each device can have multiple + * virtqueues: for example the console driver uses one queue for sending and + * another for receiving. + * + * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue + * already exists in virtio_ring.c. We just need to connect it up. + * + * We start with the information we need to keep about each virtqueue. + */ + +/*D:140 This is the information we remember about each virtqueue. */ +struct lguest_vq_info { + /* A copy of the information contained in the device config. */ + struct lguest_vqconfig config; + + /* The address where we mapped the virtio ring, so we can unmap it. */ + void *pages; +}; + +/* + * When the virtio_ring code wants to prod the Host, it calls us here and we + * make a hypercall. We hand the physical address of the virtqueue so the Host + * knows which virtqueue we're talking about. + */ +static void lg_notify(struct virtqueue *vq) +{ + /* + * We store our virtqueue information in the "priv" pointer of the + * virtqueue structure. + */ + struct lguest_vq_info *lvq = vq->priv; + + hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0); +} + +/* An extern declaration inside a C file is bad form. Don't do it. */ +extern int lguest_setup_irq(unsigned int irq); + +/* + * This routine finds the Nth virtqueue described in the configuration of + * this device and sets it up. + * + * This is kind of an ugly duckling. It'd be nicer to have a standard + * representation of a virtqueue in the configuration space, but it seems that + * everyone wants to do it differently. The KVM coders want the Guest to + * allocate its own pages and tell the Host where they are, but for lguest it's + * simpler for the Host to simply tell us where the pages are. + */ +static struct virtqueue *lg_find_vq(struct virtio_device *vdev, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name) +{ + struct lguest_device *ldev = to_lgdev(vdev); + struct lguest_vq_info *lvq; + struct virtqueue *vq; + int err; + + /* We must have this many virtqueues. */ + if (index >= ldev->desc->num_vq) + return ERR_PTR(-ENOENT); + + lvq = kmalloc(sizeof(*lvq), GFP_KERNEL); + if (!lvq) + return ERR_PTR(-ENOMEM); + + /* + * Make a copy of the "struct lguest_vqconfig" entry, which sits after + * the descriptor. We need a copy because the config space might not + * be aligned correctly. + */ + memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); + + printk("Mapping virtqueue %i addr %lx\n", index, + (unsigned long)lvq->config.pfn << PAGE_SHIFT); + /* Figure out how many pages the ring will take, and map that memory */ + lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT, + DIV_ROUND_UP(vring_size(lvq->config.num, + LGUEST_VRING_ALIGN), + PAGE_SIZE)); + if (!lvq->pages) { + err = -ENOMEM; + goto free_lvq; + } + + /* + * OK, tell virtio_ring.c to set up a virtqueue now we know its size + * and we've got a pointer to its pages. Note that we set weak_barriers + * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu + * barriers. + */ + vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev, + true, lvq->pages, lg_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto unmap; + } + + /* Make sure the interrupt is allocated. */ + err = lguest_setup_irq(lvq->config.irq); + if (err) + goto destroy_vring; + + /* + * Tell the interrupt for this virtqueue to go to the virtio_ring + * interrupt handler. + * + * FIXME: We used to have a flag for the Host to tell us we could use + * the interrupt as a source of randomness: it'd be nice to have that + * back. + */ + err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, + dev_name(&vdev->dev), vq); + if (err) + goto free_desc; + + /* + * Last of all we hook up our 'struct lguest_vq_info" to the + * virtqueue's priv pointer. + */ + vq->priv = lvq; + return vq; + +free_desc: + irq_free_desc(lvq->config.irq); +destroy_vring: + vring_del_virtqueue(vq); +unmap: + lguest_unmap(lvq->pages); +free_lvq: + kfree(lvq); + return ERR_PTR(err); +} +/*:*/ + +/* Cleaning up a virtqueue is easy */ +static void lg_del_vq(struct virtqueue *vq) +{ + struct lguest_vq_info *lvq = vq->priv; + + /* Release the interrupt */ + free_irq(lvq->config.irq, vq); + /* Tell virtio_ring.c to free the virtqueue. */ + vring_del_virtqueue(vq); + /* Unmap the pages containing the ring. */ + lguest_unmap(lvq->pages); + /* Free our own queue information. */ + kfree(lvq); +} + +static void lg_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + lg_del_vq(vq); +} + +static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct lguest_device *ldev = to_lgdev(vdev); + int i; + + /* We must have this many virtqueues. */ + if (nvqs > ldev->desc->num_vq) + return -ENOENT; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) + goto error; + } + return 0; + +error: + lg_del_vqs(vdev); + return PTR_ERR(vqs[i]); +} + +static const char *lg_bus_name(struct virtio_device *vdev) +{ + return ""; +} + +/* The ops structure which hooks everything together. */ +static struct virtio_config_ops lguest_config_ops = { + .get_features = lg_get_features, + .finalize_features = lg_finalize_features, + .get = lg_get, + .set = lg_set, + .get_status = lg_get_status, + .set_status = lg_set_status, + .reset = lg_reset, + .find_vqs = lg_find_vqs, + .del_vqs = lg_del_vqs, + .bus_name = lg_bus_name, +}; + +/* + * The root device for the lguest virtio devices. This makes them appear as + * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. + */ +static struct device *lguest_root; + +/*D:120 + * This is the core of the lguest bus: actually adding a new device. + * It's a separate function because it's neater that way, and because an + * earlier version of the code supported hotplug and unplug. They were removed + * early on because they were never used. + * + * As Andrew Tridgell says, "Untested code is buggy code". + * + * It's worth reading this carefully: we start with a pointer to the new device + * descriptor in the "lguest_devices" page, and the offset into the device + * descriptor page so we can uniquely identify it if things go badly wrong. + */ +static void add_lguest_device(struct lguest_device_desc *d, + unsigned int offset) +{ + struct lguest_device *ldev; + + /* Start with zeroed memory; Linux's device layer counts on it. */ + ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); + if (!ldev) { + printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", + offset, d->type); + return; + } + + /* This devices' parent is the lguest/ dir. */ + ldev->vdev.dev.parent = lguest_root; + /* + * The device type comes straight from the descriptor. There's also a + * device vendor field in the virtio_device struct, which we leave as + * 0. + */ + ldev->vdev.id.device = d->type; + /* + * We have a simple set of routines for querying the device's + * configuration information and setting its status. + */ + ldev->vdev.config = &lguest_config_ops; + /* And we remember the device's descriptor for lguest_config_ops. */ + ldev->desc = d; + + /* + * register_virtio_device() sets up the generic fields for the struct + * virtio_device and calls device_register(). This makes the bus + * infrastructure look for a matching driver. + */ + if (register_virtio_device(&ldev->vdev) != 0) { + printk(KERN_ERR "Failed to register lguest dev %u type %u\n", + offset, d->type); + kfree(ldev); + } +} + +/*D:110 + * scan_devices() simply iterates through the device page. The type 0 is + * reserved to mean "end of devices". + */ +static void scan_devices(void) +{ + unsigned int i; + struct lguest_device_desc *d; + + /* We start at the page beginning, and skip over each entry. */ + for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { + d = lguest_devices + i; + + /* Once we hit a zero, stop. */ + if (d->type == 0) + break; + + printk("Device at %i has size %u\n", i, desc_size(d)); + add_lguest_device(d, i); + } +} + +/*D:105 + * Fairly early in boot, lguest_devices_init() is called to set up the + * lguest device infrastructure. We check that we are a Guest by checking + * pv_info.name: there are other ways of checking, but this seems most + * obvious to me. + * + * So we can access the "struct lguest_device_desc"s easily, we map that memory + * and store the pointer in the global "lguest_devices". Then we register a + * root device from which all our devices will hang (this seems to be the + * correct sysfs incantation). + * + * Finally we call scan_devices() which adds all the devices found in the + * lguest_devices page. + */ +static int __init lguest_devices_init(void) +{ + if (strcmp(pv_info.name, "lguest") != 0) + return 0; + + lguest_root = root_device_register("lguest"); + if (IS_ERR(lguest_root)) + panic("Could not register lguest root"); + + /* Devices are in a single page above top of "normal" mem */ + lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1); + + scan_devices(); + return 0; +} +/* We do this after core stuff, but before the drivers. */ +postcore_initcall(lguest_devices_init); + +/*D:150 + * At this point in the journey we used to now wade through the lguest + * devices themselves: net, block and console. Since they're all now virtio + * devices rather than lguest-specific, I've decided to ignore them. Mostly, + * they're kind of boring. But this does mean you'll never experience the + * thrill of reading the forbidden love scene buried deep in the block driver. + * + * "make Launcher" beckons, where we answer questions like "Where do Guests + * come from?", and "What do you do when someone asks for optimization?". + */ diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c new file mode 100644 index 00000000..ff4a0bc9 --- /dev/null +++ b/drivers/lguest/lguest_user.c @@ -0,0 +1,542 @@ +/*P:200 This contains all the /dev/lguest code, whereby the userspace + * launcher controls and communicates with the Guest. For example, + * the first write will tell us the Guest's memory layout and entry + * point. A read will run the Guest until something happens, such as + * a signal or the Guest doing a NOTIFY out to the Launcher. There is + * also a way for the Launcher to attach eventfds to particular NOTIFY + * values instead of returning from the read() call. +:*/ +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/eventfd.h> +#include <linux/file.h> +#include <linux/slab.h> +#include <linux/export.h> +#include "lg.h" + +/*L:056 + * Before we move on, let's jump ahead and look at what the kernel does when + * it needs to look up the eventfds. That will complete our picture of how we + * use RCU. + * + * The notification value is in cpu->pending_notify: we return true if it went + * to an eventfd. + */ +bool send_notify_to_eventfd(struct lg_cpu *cpu) +{ + unsigned int i; + struct lg_eventfd_map *map; + + /* + * This "rcu_read_lock()" helps track when someone is still looking at + * the (RCU-using) eventfds array. It's not actually a lock at all; + * indeed it's a noop in many configurations. (You didn't expect me to + * explain all the RCU secrets here, did you?) + */ + rcu_read_lock(); + /* + * rcu_dereference is the counter-side of rcu_assign_pointer(); it + * makes sure we don't access the memory pointed to by + * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy, + * but Alpha allows this! Paul McKenney points out that a really + * aggressive compiler could have the same effect: + * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html + * + * So play safe, use rcu_dereference to get the rcu-protected pointer: + */ + map = rcu_dereference(cpu->lg->eventfds); + /* + * Simple array search: even if they add an eventfd while we do this, + * we'll continue to use the old array and just won't see the new one. + */ + for (i = 0; i < map->num; i++) { + if (map->map[i].addr == cpu->pending_notify) { + eventfd_signal(map->map[i].event, 1); + cpu->pending_notify = 0; + break; + } + } + /* We're done with the rcu-protected variable cpu->lg->eventfds. */ + rcu_read_unlock(); + + /* If we cleared the notification, it's because we found a match. */ + return cpu->pending_notify == 0; +} + +/*L:055 + * One of the more tricksy tricks in the Linux Kernel is a technique called + * Read Copy Update. Since one point of lguest is to teach lguest journeyers + * about kernel coding, I use it here. (In case you're curious, other purposes + * include learning about virtualization and instilling a deep appreciation for + * simplicity and puppies). + * + * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we + * add new eventfds without ever blocking readers from accessing the array. + * The current Launcher only does this during boot, so that never happens. But + * Read Copy Update is cool, and adding a lock risks damaging even more puppies + * than this code does. + * + * We allocate a brand new one-larger array, copy the old one and add our new + * element. Then we make the lg eventfd pointer point to the new array. + * That's the easy part: now we need to free the old one, but we need to make + * sure no slow CPU somewhere is still looking at it. That's what + * synchronize_rcu does for us: waits until every CPU has indicated that it has + * moved on to know it's no longer using the old one. + * + * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update. + */ +static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) +{ + struct lg_eventfd_map *new, *old = lg->eventfds; + + /* + * We don't allow notifications on value 0 anyway (pending_notify of + * 0 means "nothing pending"). + */ + if (!addr) + return -EINVAL; + + /* + * Replace the old array with the new one, carefully: others can + * be accessing it at the same time. + */ + new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), + GFP_KERNEL); + if (!new) + return -ENOMEM; + + /* First make identical copy. */ + memcpy(new->map, old->map, sizeof(old->map[0]) * old->num); + new->num = old->num; + + /* Now append new entry. */ + new->map[new->num].addr = addr; + new->map[new->num].event = eventfd_ctx_fdget(fd); + if (IS_ERR(new->map[new->num].event)) { + int err = PTR_ERR(new->map[new->num].event); + kfree(new); + return err; + } + new->num++; + + /* + * Now put new one in place: rcu_assign_pointer() is a fancy way of + * doing "lg->eventfds = new", but it uses memory barriers to make + * absolutely sure that the contents of "new" written above is nailed + * down before we actually do the assignment. + * + * We have to think about these kinds of things when we're operating on + * live data without locks. + */ + rcu_assign_pointer(lg->eventfds, new); + + /* + * We're not in a big hurry. Wait until no one's looking at old + * version, then free it. + */ + synchronize_rcu(); + kfree(old); + + return 0; +} + +/*L:052 + * Receiving notifications from the Guest is usually done by attaching a + * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will + * become readable when the Guest does an LHCALL_NOTIFY with that value. + * + * This is really convenient for processing each virtqueue in a separate + * thread. + */ +static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) +{ + unsigned long addr, fd; + int err; + + if (get_user(addr, input) != 0) + return -EFAULT; + input++; + if (get_user(fd, input) != 0) + return -EFAULT; + + /* + * Just make sure two callers don't add eventfds at once. We really + * only need to lock against callers adding to the same Guest, so using + * the Big Lguest Lock is overkill. But this is setup, not a fast path. + */ + mutex_lock(&lguest_lock); + err = add_eventfd(lg, addr, fd); + mutex_unlock(&lguest_lock); + + return err; +} + +/*L:050 + * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt + * number to /dev/lguest. + */ +static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) +{ + unsigned long irq; + + if (get_user(irq, input) != 0) + return -EFAULT; + if (irq >= LGUEST_IRQS) + return -EINVAL; + + /* + * Next time the Guest runs, the core code will see if it can deliver + * this interrupt. + */ + set_interrupt(cpu, irq); + return 0; +} + +/*L:040 + * Once our Guest is initialized, the Launcher makes it run by reading + * from /dev/lguest. + */ +static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) +{ + struct lguest *lg = file->private_data; + struct lg_cpu *cpu; + unsigned int cpu_id = *o; + + /* You must write LHREQ_INITIALIZE first! */ + if (!lg) + return -EINVAL; + + /* Watch out for arbitrary vcpu indexes! */ + if (cpu_id >= lg->nr_cpus) + return -EINVAL; + + cpu = &lg->cpus[cpu_id]; + + /* If you're not the task which owns the Guest, go away. */ + if (current != cpu->tsk) + return -EPERM; + + /* If the Guest is already dead, we indicate why */ + if (lg->dead) { + size_t len; + + /* lg->dead either contains an error code, or a string. */ + if (IS_ERR(lg->dead)) + return PTR_ERR(lg->dead); + + /* We can only return as much as the buffer they read with. */ + len = min(size, strlen(lg->dead)+1); + if (copy_to_user(user, lg->dead, len) != 0) + return -EFAULT; + return len; + } + + /* + * If we returned from read() last time because the Guest sent I/O, + * clear the flag. + */ + if (cpu->pending_notify) + cpu->pending_notify = 0; + + /* Run the Guest until something interesting happens. */ + return run_guest(cpu, (unsigned long __user *)user); +} + +/*L:025 + * This actually initializes a CPU. For the moment, a Guest is only + * uniprocessor, so "id" is always 0. + */ +static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) +{ + /* We have a limited number the number of CPUs in the lguest struct. */ + if (id >= ARRAY_SIZE(cpu->lg->cpus)) + return -EINVAL; + + /* Set up this CPU's id, and pointer back to the lguest struct. */ + cpu->id = id; + cpu->lg = container_of((cpu - id), struct lguest, cpus[0]); + cpu->lg->nr_cpus++; + + /* Each CPU has a timer it can set. */ + init_clockdev(cpu); + + /* + * We need a complete page for the Guest registers: they are accessible + * to the Guest and we can only grant it access to whole pages. + */ + cpu->regs_page = get_zeroed_page(GFP_KERNEL); + if (!cpu->regs_page) + return -ENOMEM; + + /* We actually put the registers at the bottom of the page. */ + cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); + + /* + * Now we initialize the Guest's registers, handing it the start + * address. + */ + lguest_arch_setup_regs(cpu, start_ip); + + /* + * We keep a pointer to the Launcher task (ie. current task) for when + * other Guests want to wake this one (eg. console input). + */ + cpu->tsk = current; + + /* + * We need to keep a pointer to the Launcher's memory map, because if + * the Launcher dies we need to clean it up. If we don't keep a + * reference, it is destroyed before close() is called. + */ + cpu->mm = get_task_mm(cpu->tsk); + + /* + * We remember which CPU's pages this Guest used last, for optimization + * when the same Guest runs on the same CPU twice. + */ + cpu->last_pages = NULL; + + /* No error == success. */ + return 0; +} + +/*L:020 + * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in + * addition to the LHREQ_INITIALIZE value). These are: + * + * base: The start of the Guest-physical memory inside the Launcher memory. + * + * pfnlimit: The highest (Guest-physical) page number the Guest should be + * allowed to access. The Guest memory lives inside the Launcher, so it sets + * this to ensure the Guest can only reach its own memory. + * + * start: The first instruction to execute ("eip" in x86-speak). + */ +static int initialize(struct file *file, const unsigned long __user *input) +{ + /* "struct lguest" contains all we (the Host) know about a Guest. */ + struct lguest *lg; + int err; + unsigned long args[3]; + + /* + * We grab the Big Lguest lock, which protects against multiple + * simultaneous initializations. + */ + mutex_lock(&lguest_lock); + /* You can't initialize twice! Close the device and start again... */ + if (file->private_data) { + err = -EBUSY; + goto unlock; + } + + if (copy_from_user(args, input, sizeof(args)) != 0) { + err = -EFAULT; + goto unlock; + } + + lg = kzalloc(sizeof(*lg), GFP_KERNEL); + if (!lg) { + err = -ENOMEM; + goto unlock; + } + + lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL); + if (!lg->eventfds) { + err = -ENOMEM; + goto free_lg; + } + lg->eventfds->num = 0; + + /* Populate the easy fields of our "struct lguest" */ + lg->mem_base = (void __user *)args[0]; + lg->pfn_limit = args[1]; + + /* This is the first cpu (cpu 0) and it will start booting at args[2] */ + err = lg_cpu_start(&lg->cpus[0], 0, args[2]); + if (err) + goto free_eventfds; + + /* + * Initialize the Guest's shadow page tables. This allocates + * memory, so can fail. + */ + err = init_guest_pagetable(lg); + if (err) + goto free_regs; + + /* We keep our "struct lguest" in the file's private_data. */ + file->private_data = lg; + + mutex_unlock(&lguest_lock); + + /* And because this is a write() call, we return the length used. */ + return sizeof(args); + +free_regs: + /* FIXME: This should be in free_vcpu */ + free_page(lg->cpus[0].regs_page); +free_eventfds: + kfree(lg->eventfds); +free_lg: + kfree(lg); +unlock: + mutex_unlock(&lguest_lock); + return err; +} + +/*L:010 + * The first operation the Launcher does must be a write. All writes + * start with an unsigned long number: for the first write this must be + * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use + * writes of other values to send interrupts or set up receipt of notifications. + * + * Note that we overload the "offset" in the /dev/lguest file to indicate what + * CPU number we're dealing with. Currently this is always 0 since we only + * support uniprocessor Guests, but you can see the beginnings of SMP support + * here. + */ +static ssize_t write(struct file *file, const char __user *in, + size_t size, loff_t *off) +{ + /* + * Once the Guest is initialized, we hold the "struct lguest" in the + * file private data. + */ + struct lguest *lg = file->private_data; + const unsigned long __user *input = (const unsigned long __user *)in; + unsigned long req; + struct lg_cpu *uninitialized_var(cpu); + unsigned int cpu_id = *off; + + /* The first value tells us what this request is. */ + if (get_user(req, input) != 0) + return -EFAULT; + input++; + + /* If you haven't initialized, you must do that first. */ + if (req != LHREQ_INITIALIZE) { + if (!lg || (cpu_id >= lg->nr_cpus)) + return -EINVAL; + cpu = &lg->cpus[cpu_id]; + + /* Once the Guest is dead, you can only read() why it died. */ + if (lg->dead) + return -ENOENT; + } + + switch (req) { + case LHREQ_INITIALIZE: + return initialize(file, input); + case LHREQ_IRQ: + return user_send_irq(cpu, input); + case LHREQ_EVENTFD: + return attach_eventfd(lg, input); + default: + return -EINVAL; + } +} + +/*L:060 + * The final piece of interface code is the close() routine. It reverses + * everything done in initialize(). This is usually called because the + * Launcher exited. + * + * Note that the close routine returns 0 or a negative error number: it can't + * really fail, but it can whine. I blame Sun for this wart, and K&R C for + * letting them do it. +:*/ +static int close(struct inode *inode, struct file *file) +{ + struct lguest *lg = file->private_data; + unsigned int i; + + /* If we never successfully initialized, there's nothing to clean up */ + if (!lg) + return 0; + + /* + * We need the big lock, to protect from inter-guest I/O and other + * Launchers initializing guests. + */ + mutex_lock(&lguest_lock); + + /* Free up the shadow page tables for the Guest. */ + free_guest_pagetable(lg); + + for (i = 0; i < lg->nr_cpus; i++) { + /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */ + hrtimer_cancel(&lg->cpus[i].hrt); + /* We can free up the register page we allocated. */ + free_page(lg->cpus[i].regs_page); + /* + * Now all the memory cleanups are done, it's safe to release + * the Launcher's memory management structure. + */ + mmput(lg->cpus[i].mm); + } + + /* Release any eventfds they registered. */ + for (i = 0; i < lg->eventfds->num; i++) + eventfd_ctx_put(lg->eventfds->map[i].event); + kfree(lg->eventfds); + + /* + * If lg->dead doesn't contain an error code it will be NULL or a + * kmalloc()ed string, either of which is ok to hand to kfree(). + */ + if (!IS_ERR(lg->dead)) + kfree(lg->dead); + /* Free the memory allocated to the lguest_struct */ + kfree(lg); + /* Release lock and exit. */ + mutex_unlock(&lguest_lock); + + return 0; +} + +/*L:000 + * Welcome to our journey through the Launcher! + * + * The Launcher is the Host userspace program which sets up, runs and services + * the Guest. In fact, many comments in the Drivers which refer to "the Host" + * doing things are inaccurate: the Launcher does all the device handling for + * the Guest, but the Guest can't know that. + * + * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we + * shall see more of that later. + * + * We begin our understanding with the Host kernel interface which the Launcher + * uses: reading and writing a character device called /dev/lguest. All the + * work happens in the read(), write() and close() routines: + */ +static const struct file_operations lguest_fops = { + .owner = THIS_MODULE, + .release = close, + .write = write, + .read = read, + .llseek = default_llseek, +}; +/*:*/ + +/* + * This is a textbook example of a "misc" character device. Populate a "struct + * miscdevice" and register it with misc_register(). + */ +static struct miscdevice lguest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "lguest", + .fops = &lguest_fops, +}; + +int __init lguest_device_init(void) +{ + return misc_register(&lguest_dev); +} + +void __exit lguest_device_remove(void) +{ + misc_deregister(&lguest_dev); +} diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c new file mode 100644 index 00000000..3b62be16 --- /dev/null +++ b/drivers/lguest/page_tables.c @@ -0,0 +1,1152 @@ +/*P:700 + * The pagetable code, on the other hand, still shows the scars of + * previous encounters. It's functional, and as neat as it can be in the + * circumstances, but be wary, for these things are subtle and break easily. + * The Guest provides a virtual to physical mapping, but we can neither trust + * it nor use it: we verify and convert it here then point the CPU to the + * converted Guest pages when running the Guest. +:*/ + +/* Copyright (C) Rusty Russell IBM Corporation 2006. + * GPL v2 and any later version */ +#include <linux/mm.h> +#include <linux/gfp.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/random.h> +#include <linux/percpu.h> +#include <asm/tlbflush.h> +#include <asm/uaccess.h> +#include "lg.h" + +/*M:008 + * We hold reference to pages, which prevents them from being swapped. + * It'd be nice to have a callback in the "struct mm_struct" when Linux wants + * to swap out. If we had this, and a shrinker callback to trim PTE pages, we + * could probably consider launching Guests as non-root. +:*/ + +/*H:300 + * The Page Table Code + * + * We use two-level page tables for the Guest, or three-level with PAE. If + * you're not entirely comfortable with virtual addresses, physical addresses + * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page + * Table Handling" (with diagrams!). + * + * The Guest keeps page tables, but we maintain the actual ones here: these are + * called "shadow" page tables. Which is a very Guest-centric name: these are + * the real page tables the CPU uses, although we keep them up to date to + * reflect the Guest's. (See what I mean about weird naming? Since when do + * shadows reflect anything?) + * + * Anyway, this is the most complicated part of the Host code. There are seven + * parts to this: + * (i) Looking up a page table entry when the Guest faults, + * (ii) Making sure the Guest stack is mapped, + * (iii) Setting up a page table entry when the Guest tells us one has changed, + * (iv) Switching page tables, + * (v) Flushing (throwing away) page tables, + * (vi) Mapping the Switcher when the Guest is about to run, + * (vii) Setting up the page tables initially. +:*/ + +/* + * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB) + * or 512 PTE entries with PAE (2MB). + */ +#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) + +/* + * For PAE we need the PMD index as well. We use the last 2MB, so we + * will need the last pmd entry of the last pmd page. + */ +#ifdef CONFIG_X86_PAE +#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) +#define RESERVE_MEM 2U +#define CHECK_GPGD_MASK _PAGE_PRESENT +#else +#define RESERVE_MEM 4U +#define CHECK_GPGD_MASK _PAGE_TABLE +#endif + +/* + * We actually need a separate PTE page for each CPU. Remember that after the + * Switcher code itself comes two pages for each CPU, and we don't want this + * CPU's guest to see the pages of any other CPU. + */ +static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); +#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) + +/*H:320 + * The page table code is curly enough to need helper functions to keep it + * clear and clean. The kernel itself provides many of them; one advantage + * of insisting that the Guest and Host use the same CONFIG_PAE setting. + * + * There are two functions which return pointers to the shadow (aka "real") + * page tables. + * + * spgd_addr() takes the virtual address and returns a pointer to the top-level + * page directory entry (PGD) for that address. Since we keep track of several + * page tables, the "i" argument tells us which one we're interested in (it's + * usually the current one). + */ +static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) +{ + unsigned int index = pgd_index(vaddr); + +#ifndef CONFIG_X86_PAE + /* We kill any Guest trying to touch the Switcher addresses. */ + if (index >= SWITCHER_PGD_INDEX) { + kill_guest(cpu, "attempt to access switcher pages"); + index = 0; + } +#endif + /* Return a pointer index'th pgd entry for the i'th page table. */ + return &cpu->lg->pgdirs[i].pgdir[index]; +} + +#ifdef CONFIG_X86_PAE +/* + * This routine then takes the PGD entry given above, which contains the + * address of the PMD page. It then returns a pointer to the PMD entry for the + * given address. + */ +static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) +{ + unsigned int index = pmd_index(vaddr); + pmd_t *page; + + /* We kill any Guest trying to touch the Switcher addresses. */ + if (pgd_index(vaddr) == SWITCHER_PGD_INDEX && + index >= SWITCHER_PMD_INDEX) { + kill_guest(cpu, "attempt to access switcher pages"); + index = 0; + } + + /* You should never call this if the PGD entry wasn't valid */ + BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); + page = __va(pgd_pfn(spgd) << PAGE_SHIFT); + + return &page[index]; +} +#endif + +/* + * This routine then takes the page directory entry returned above, which + * contains the address of the page table entry (PTE) page. It then returns a + * pointer to the PTE entry for the given address. + */ +static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) +{ +#ifdef CONFIG_X86_PAE + pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); + pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT); + + /* You should never call this if the PMD entry wasn't valid */ + BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)); +#else + pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); + /* You should never call this if the PGD entry wasn't valid */ + BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); +#endif + + return &page[pte_index(vaddr)]; +} + +/* + * These functions are just like the above, except they access the Guest + * page tables. Hence they return a Guest address. + */ +static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) +{ + unsigned int index = vaddr >> (PGDIR_SHIFT); + return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); +} + +#ifdef CONFIG_X86_PAE +/* Follow the PGD to the PMD. */ +static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) +{ + unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; + BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); + return gpage + pmd_index(vaddr) * sizeof(pmd_t); +} + +/* Follow the PMD to the PTE. */ +static unsigned long gpte_addr(struct lg_cpu *cpu, + pmd_t gpmd, unsigned long vaddr) +{ + unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT; + + BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); + return gpage + pte_index(vaddr) * sizeof(pte_t); +} +#else +/* Follow the PGD to the PTE (no mid-level for !PAE). */ +static unsigned long gpte_addr(struct lg_cpu *cpu, + pgd_t gpgd, unsigned long vaddr) +{ + unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; + + BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); + return gpage + pte_index(vaddr) * sizeof(pte_t); +} +#endif +/*:*/ + +/*M:007 + * get_pfn is slow: we could probably try to grab batches of pages here as + * an optimization (ie. pre-faulting). +:*/ + +/*H:350 + * This routine takes a page number given by the Guest and converts it to + * an actual, physical page number. It can fail for several reasons: the + * virtual address might not be mapped by the Launcher, the write flag is set + * and the page is read-only, or the write flag was set and the page was + * shared so had to be copied, but we ran out of memory. + * + * This holds a reference to the page, so release_pte() is careful to put that + * back. + */ +static unsigned long get_pfn(unsigned long virtpfn, int write) +{ + struct page *page; + + /* gup me one page at this address please! */ + if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1) + return page_to_pfn(page); + + /* This value indicates failure. */ + return -1UL; +} + +/*H:340 + * Converting a Guest page table entry to a shadow (ie. real) page table + * entry can be a little tricky. The flags are (almost) the same, but the + * Guest PTE contains a virtual page number: the CPU needs the real page + * number. + */ +static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) +{ + unsigned long pfn, base, flags; + + /* + * The Guest sets the global flag, because it thinks that it is using + * PGE. We only told it to use PGE so it would tell us whether it was + * flushing a kernel mapping or a userspace mapping. We don't actually + * use the global bit, so throw it away. + */ + flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); + + /* The Guest's pages are offset inside the Launcher. */ + base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; + + /* + * We need a temporary "unsigned long" variable to hold the answer from + * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't + * fit in spte.pfn. get_pfn() finds the real physical number of the + * page, given the virtual number. + */ + pfn = get_pfn(base + pte_pfn(gpte), write); + if (pfn == -1UL) { + kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); + /* + * When we destroy the Guest, we'll go through the shadow page + * tables and release_pte() them. Make sure we don't think + * this one is valid! + */ + flags = 0; + } + /* Now we assemble our shadow PTE from the page number and flags. */ + return pfn_pte(pfn, __pgprot(flags)); +} + +/*H:460 And to complete the chain, release_pte() looks like this: */ +static void release_pte(pte_t pte) +{ + /* + * Remember that get_user_pages_fast() took a reference to the page, in + * get_pfn()? We have to put it back now. + */ + if (pte_flags(pte) & _PAGE_PRESENT) + put_page(pte_page(pte)); +} +/*:*/ + +static void check_gpte(struct lg_cpu *cpu, pte_t gpte) +{ + if ((pte_flags(gpte) & _PAGE_PSE) || + pte_pfn(gpte) >= cpu->lg->pfn_limit) + kill_guest(cpu, "bad page table entry"); +} + +static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) +{ + if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) || + (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) + kill_guest(cpu, "bad page directory entry"); +} + +#ifdef CONFIG_X86_PAE +static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) +{ + if ((pmd_flags(gpmd) & ~_PAGE_TABLE) || + (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) + kill_guest(cpu, "bad page middle directory entry"); +} +#endif + +/*H:330 + * (i) Looking up a page table entry when the Guest faults. + * + * We saw this call in run_guest(): when we see a page fault in the Guest, we + * come here. That's because we only set up the shadow page tables lazily as + * they're needed, so we get page faults all the time and quietly fix them up + * and return to the Guest without it knowing. + * + * If we fixed up the fault (ie. we mapped the address), this routine returns + * true. Otherwise, it was a real fault and we need to tell the Guest. + */ +bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) +{ + pgd_t gpgd; + pgd_t *spgd; + unsigned long gpte_ptr; + pte_t gpte; + pte_t *spte; + + /* Mid level for PAE. */ +#ifdef CONFIG_X86_PAE + pmd_t *spmd; + pmd_t gpmd; +#endif + + /* First step: get the top-level Guest page table entry. */ + if (unlikely(cpu->linear_pages)) { + /* Faking up a linear mapping. */ + gpgd = __pgd(CHECK_GPGD_MASK); + } else { + gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); + /* Toplevel not present? We can't map it in. */ + if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) + return false; + } + + /* Now look at the matching shadow entry. */ + spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); + if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { + /* No shadow entry: allocate a new shadow PTE page. */ + unsigned long ptepage = get_zeroed_page(GFP_KERNEL); + /* + * This is not really the Guest's fault, but killing it is + * simple for this corner case. + */ + if (!ptepage) { + kill_guest(cpu, "out of memory allocating pte page"); + return false; + } + /* We check that the Guest pgd is OK. */ + check_gpgd(cpu, gpgd); + /* + * And we copy the flags to the shadow PGD entry. The page + * number in the shadow PGD is the page we just allocated. + */ + set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); + } + +#ifdef CONFIG_X86_PAE + if (unlikely(cpu->linear_pages)) { + /* Faking up a linear mapping. */ + gpmd = __pmd(_PAGE_TABLE); + } else { + gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); + /* Middle level not present? We can't map it in. */ + if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) + return false; + } + + /* Now look at the matching shadow entry. */ + spmd = spmd_addr(cpu, *spgd, vaddr); + + if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) { + /* No shadow entry: allocate a new shadow PTE page. */ + unsigned long ptepage = get_zeroed_page(GFP_KERNEL); + + /* + * This is not really the Guest's fault, but killing it is + * simple for this corner case. + */ + if (!ptepage) { + kill_guest(cpu, "out of memory allocating pte page"); + return false; + } + + /* We check that the Guest pmd is OK. */ + check_gpmd(cpu, gpmd); + + /* + * And we copy the flags to the shadow PMD entry. The page + * number in the shadow PMD is the page we just allocated. + */ + set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); + } + + /* + * OK, now we look at the lower level in the Guest page table: keep its + * address, because we might update it later. + */ + gpte_ptr = gpte_addr(cpu, gpmd, vaddr); +#else + /* + * OK, now we look at the lower level in the Guest page table: keep its + * address, because we might update it later. + */ + gpte_ptr = gpte_addr(cpu, gpgd, vaddr); +#endif + + if (unlikely(cpu->linear_pages)) { + /* Linear? Make up a PTE which points to same page. */ + gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); + } else { + /* Read the actual PTE value. */ + gpte = lgread(cpu, gpte_ptr, pte_t); + } + + /* If this page isn't in the Guest page tables, we can't page it in. */ + if (!(pte_flags(gpte) & _PAGE_PRESENT)) + return false; + + /* + * Check they're not trying to write to a page the Guest wants + * read-only (bit 2 of errcode == write). + */ + if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) + return false; + + /* User access to a kernel-only page? (bit 3 == user access) */ + if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) + return false; + + /* + * Check that the Guest PTE flags are OK, and the page number is below + * the pfn_limit (ie. not mapping the Launcher binary). + */ + check_gpte(cpu, gpte); + + /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ + gpte = pte_mkyoung(gpte); + if (errcode & 2) + gpte = pte_mkdirty(gpte); + + /* Get the pointer to the shadow PTE entry we're going to set. */ + spte = spte_addr(cpu, *spgd, vaddr); + + /* + * If there was a valid shadow PTE entry here before, we release it. + * This can happen with a write to a previously read-only entry. + */ + release_pte(*spte); + + /* + * If this is a write, we insist that the Guest page is writable (the + * final arg to gpte_to_spte()). + */ + if (pte_dirty(gpte)) + *spte = gpte_to_spte(cpu, gpte, 1); + else + /* + * If this is a read, don't set the "writable" bit in the page + * table entry, even if the Guest says it's writable. That way + * we will come back here when a write does actually occur, so + * we can update the Guest's _PAGE_DIRTY flag. + */ + set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); + + /* + * Finally, we write the Guest PTE entry back: we've set the + * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. + */ + if (likely(!cpu->linear_pages)) + lgwrite(cpu, gpte_ptr, pte_t, gpte); + + /* + * The fault is fixed, the page table is populated, the mapping + * manipulated, the result returned and the code complete. A small + * delay and a trace of alliteration are the only indications the Guest + * has that a page fault occurred at all. + */ + return true; +} + +/*H:360 + * (ii) Making sure the Guest stack is mapped. + * + * Remember that direct traps into the Guest need a mapped Guest kernel stack. + * pin_stack_pages() calls us here: we could simply call demand_page(), but as + * we've seen that logic is quite long, and usually the stack pages are already + * mapped, so it's overkill. + * + * This is a quick version which answers the question: is this virtual address + * mapped by the shadow page tables, and is it writable? + */ +static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) +{ + pgd_t *spgd; + unsigned long flags; + +#ifdef CONFIG_X86_PAE + pmd_t *spmd; +#endif + /* Look at the current top level entry: is it present? */ + spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); + if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) + return false; + +#ifdef CONFIG_X86_PAE + spmd = spmd_addr(cpu, *spgd, vaddr); + if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) + return false; +#endif + + /* + * Check the flags on the pte entry itself: it must be present and + * writable. + */ + flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); + + return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); +} + +/* + * So, when pin_stack_pages() asks us to pin a page, we check if it's already + * in the page tables, and if not, we call demand_page() with error code 2 + * (meaning "write"). + */ +void pin_page(struct lg_cpu *cpu, unsigned long vaddr) +{ + if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) + kill_guest(cpu, "bad stack page %#lx", vaddr); +} +/*:*/ + +#ifdef CONFIG_X86_PAE +static void release_pmd(pmd_t *spmd) +{ + /* If the entry's not present, there's nothing to release. */ + if (pmd_flags(*spmd) & _PAGE_PRESENT) { + unsigned int i; + pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT); + /* For each entry in the page, we might need to release it. */ + for (i = 0; i < PTRS_PER_PTE; i++) + release_pte(ptepage[i]); + /* Now we can free the page of PTEs */ + free_page((long)ptepage); + /* And zero out the PMD entry so we never release it twice. */ + set_pmd(spmd, __pmd(0)); + } +} + +static void release_pgd(pgd_t *spgd) +{ + /* If the entry's not present, there's nothing to release. */ + if (pgd_flags(*spgd) & _PAGE_PRESENT) { + unsigned int i; + pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); + + for (i = 0; i < PTRS_PER_PMD; i++) + release_pmd(&pmdpage[i]); + + /* Now we can free the page of PMDs */ + free_page((long)pmdpage); + /* And zero out the PGD entry so we never release it twice. */ + set_pgd(spgd, __pgd(0)); + } +} + +#else /* !CONFIG_X86_PAE */ +/*H:450 + * If we chase down the release_pgd() code, the non-PAE version looks like + * this. The PAE version is almost identical, but instead of calling + * release_pte it calls release_pmd(), which looks much like this. + */ +static void release_pgd(pgd_t *spgd) +{ + /* If the entry's not present, there's nothing to release. */ + if (pgd_flags(*spgd) & _PAGE_PRESENT) { + unsigned int i; + /* + * Converting the pfn to find the actual PTE page is easy: turn + * the page number into a physical address, then convert to a + * virtual address (easy for kernel pages like this one). + */ + pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); + /* For each entry in the page, we might need to release it. */ + for (i = 0; i < PTRS_PER_PTE; i++) + release_pte(ptepage[i]); + /* Now we can free the page of PTEs */ + free_page((long)ptepage); + /* And zero out the PGD entry so we never release it twice. */ + *spgd = __pgd(0); + } +} +#endif + +/*H:445 + * We saw flush_user_mappings() twice: once from the flush_user_mappings() + * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. + * It simply releases every PTE page from 0 up to the Guest's kernel address. + */ +static void flush_user_mappings(struct lguest *lg, int idx) +{ + unsigned int i; + /* Release every pgd entry up to the kernel's address. */ + for (i = 0; i < pgd_index(lg->kernel_address); i++) + release_pgd(lg->pgdirs[idx].pgdir + i); +} + +/*H:440 + * (v) Flushing (throwing away) page tables, + * + * The Guest has a hypercall to throw away the page tables: it's used when a + * large number of mappings have been changed. + */ +void guest_pagetable_flush_user(struct lg_cpu *cpu) +{ + /* Drop the userspace part of the current page table. */ + flush_user_mappings(cpu->lg, cpu->cpu_pgd); +} +/*:*/ + +/* We walk down the guest page tables to get a guest-physical address */ +unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) +{ + pgd_t gpgd; + pte_t gpte; +#ifdef CONFIG_X86_PAE + pmd_t gpmd; +#endif + + /* Still not set up? Just map 1:1. */ + if (unlikely(cpu->linear_pages)) + return vaddr; + + /* First step: get the top-level Guest page table entry. */ + gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); + /* Toplevel not present? We can't map it in. */ + if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) { + kill_guest(cpu, "Bad address %#lx", vaddr); + return -1UL; + } + +#ifdef CONFIG_X86_PAE + gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); + if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) + kill_guest(cpu, "Bad address %#lx", vaddr); + gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); +#else + gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); +#endif + if (!(pte_flags(gpte) & _PAGE_PRESENT)) + kill_guest(cpu, "Bad address %#lx", vaddr); + + return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); +} + +/* + * We keep several page tables. This is a simple routine to find the page + * table (if any) corresponding to this top-level address the Guest has given + * us. + */ +static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) +{ + unsigned int i; + for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) + if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) + break; + return i; +} + +/*H:435 + * And this is us, creating the new page directory. If we really do + * allocate a new one (and so the kernel parts are not there), we set + * blank_pgdir. + */ +static unsigned int new_pgdir(struct lg_cpu *cpu, + unsigned long gpgdir, + int *blank_pgdir) +{ + unsigned int next; +#ifdef CONFIG_X86_PAE + pmd_t *pmd_table; +#endif + + /* + * We pick one entry at random to throw out. Choosing the Least + * Recently Used might be better, but this is easy. + */ + next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); + /* If it's never been allocated at all before, try now. */ + if (!cpu->lg->pgdirs[next].pgdir) { + cpu->lg->pgdirs[next].pgdir = + (pgd_t *)get_zeroed_page(GFP_KERNEL); + /* If the allocation fails, just keep using the one we have */ + if (!cpu->lg->pgdirs[next].pgdir) + next = cpu->cpu_pgd; + else { +#ifdef CONFIG_X86_PAE + /* + * In PAE mode, allocate a pmd page and populate the + * last pgd entry. + */ + pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); + if (!pmd_table) { + free_page((long)cpu->lg->pgdirs[next].pgdir); + set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0)); + next = cpu->cpu_pgd; + } else { + set_pgd(cpu->lg->pgdirs[next].pgdir + + SWITCHER_PGD_INDEX, + __pgd(__pa(pmd_table) | _PAGE_PRESENT)); + /* + * This is a blank page, so there are no kernel + * mappings: caller must map the stack! + */ + *blank_pgdir = 1; + } +#else + *blank_pgdir = 1; +#endif + } + } + /* Record which Guest toplevel this shadows. */ + cpu->lg->pgdirs[next].gpgdir = gpgdir; + /* Release all the non-kernel mappings. */ + flush_user_mappings(cpu->lg, next); + + return next; +} + +/*H:470 + * Finally, a routine which throws away everything: all PGD entries in all + * the shadow page tables, including the Guest's kernel mappings. This is used + * when we destroy the Guest. + */ +static void release_all_pagetables(struct lguest *lg) +{ + unsigned int i, j; + + /* Every shadow pagetable this Guest has */ + for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) + if (lg->pgdirs[i].pgdir) { +#ifdef CONFIG_X86_PAE + pgd_t *spgd; + pmd_t *pmdpage; + unsigned int k; + + /* Get the last pmd page. */ + spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; + pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); + + /* + * And release the pmd entries of that pmd page, + * except for the switcher pmd. + */ + for (k = 0; k < SWITCHER_PMD_INDEX; k++) + release_pmd(&pmdpage[k]); +#endif + /* Every PGD entry except the Switcher at the top */ + for (j = 0; j < SWITCHER_PGD_INDEX; j++) + release_pgd(lg->pgdirs[i].pgdir + j); + } +} + +/* + * We also throw away everything when a Guest tells us it's changed a kernel + * mapping. Since kernel mappings are in every page table, it's easiest to + * throw them all away. This traps the Guest in amber for a while as + * everything faults back in, but it's rare. + */ +void guest_pagetable_clear_all(struct lg_cpu *cpu) +{ + release_all_pagetables(cpu->lg); + /* We need the Guest kernel stack mapped again. */ + pin_stack_pages(cpu); +} + +/*H:430 + * (iv) Switching page tables + * + * Now we've seen all the page table setting and manipulation, let's see + * what happens when the Guest changes page tables (ie. changes the top-level + * pgdir). This occurs on almost every context switch. + */ +void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) +{ + int newpgdir, repin = 0; + + /* + * The very first time they call this, we're actually running without + * any page tables; we've been making it up. Throw them away now. + */ + if (unlikely(cpu->linear_pages)) { + release_all_pagetables(cpu->lg); + cpu->linear_pages = false; + /* Force allocation of a new pgdir. */ + newpgdir = ARRAY_SIZE(cpu->lg->pgdirs); + } else { + /* Look to see if we have this one already. */ + newpgdir = find_pgdir(cpu->lg, pgtable); + } + + /* + * If not, we allocate or mug an existing one: if it's a fresh one, + * repin gets set to 1. + */ + if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) + newpgdir = new_pgdir(cpu, pgtable, &repin); + /* Change the current pgd index to the new one. */ + cpu->cpu_pgd = newpgdir; + /* If it was completely blank, we map in the Guest kernel stack */ + if (repin) + pin_stack_pages(cpu); +} +/*:*/ + +/*M:009 + * Since we throw away all mappings when a kernel mapping changes, our + * performance sucks for guests using highmem. In fact, a guest with + * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is + * usually slower than a Guest with less memory. + * + * This, of course, cannot be fixed. It would take some kind of... well, I + * don't know, but the term "puissant code-fu" comes to mind. +:*/ + +/*H:420 + * This is the routine which actually sets the page table entry for then + * "idx"'th shadow page table. + * + * Normally, we can just throw out the old entry and replace it with 0: if they + * use it demand_page() will put the new entry in. We need to do this anyway: + * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page + * is read from, and _PAGE_DIRTY when it's written to. + * + * But Avi Kivity pointed out that most Operating Systems (Linux included) set + * these bits on PTEs immediately anyway. This is done to save the CPU from + * having to update them, but it helps us the same way: if they set + * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if + * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. + */ +static void do_set_pte(struct lg_cpu *cpu, int idx, + unsigned long vaddr, pte_t gpte) +{ + /* Look up the matching shadow page directory entry. */ + pgd_t *spgd = spgd_addr(cpu, idx, vaddr); +#ifdef CONFIG_X86_PAE + pmd_t *spmd; +#endif + + /* If the top level isn't present, there's no entry to update. */ + if (pgd_flags(*spgd) & _PAGE_PRESENT) { +#ifdef CONFIG_X86_PAE + spmd = spmd_addr(cpu, *spgd, vaddr); + if (pmd_flags(*spmd) & _PAGE_PRESENT) { +#endif + /* Otherwise, start by releasing the existing entry. */ + pte_t *spte = spte_addr(cpu, *spgd, vaddr); + release_pte(*spte); + + /* + * If they're setting this entry as dirty or accessed, + * we might as well put that entry they've given us in + * now. This shaves 10% off a copy-on-write + * micro-benchmark. + */ + if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { + check_gpte(cpu, gpte); + set_pte(spte, + gpte_to_spte(cpu, gpte, + pte_flags(gpte) & _PAGE_DIRTY)); + } else { + /* + * Otherwise kill it and we can demand_page() + * it in later. + */ + set_pte(spte, __pte(0)); + } +#ifdef CONFIG_X86_PAE + } +#endif + } +} + +/*H:410 + * Updating a PTE entry is a little trickier. + * + * We keep track of several different page tables (the Guest uses one for each + * process, so it makes sense to cache at least a few). Each of these have + * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for + * all processes. So when the page table above that address changes, we update + * all the page tables, not just the current one. This is rare. + * + * The benefit is that when we have to track a new page table, we can keep all + * the kernel mappings. This speeds up context switch immensely. + */ +void guest_set_pte(struct lg_cpu *cpu, + unsigned long gpgdir, unsigned long vaddr, pte_t gpte) +{ + /* + * Kernel mappings must be changed on all top levels. Slow, but doesn't + * happen often. + */ + if (vaddr >= cpu->lg->kernel_address) { + unsigned int i; + for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) + if (cpu->lg->pgdirs[i].pgdir) + do_set_pte(cpu, i, vaddr, gpte); + } else { + /* Is this page table one we have a shadow for? */ + int pgdir = find_pgdir(cpu->lg, gpgdir); + if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) + /* If so, do the update. */ + do_set_pte(cpu, pgdir, vaddr, gpte); + } +} + +/*H:400 + * (iii) Setting up a page table entry when the Guest tells us one has changed. + * + * Just like we did in interrupts_and_traps.c, it makes sense for us to deal + * with the other side of page tables while we're here: what happens when the + * Guest asks for a page table to be updated? + * + * We already saw that demand_page() will fill in the shadow page tables when + * needed, so we can simply remove shadow page table entries whenever the Guest + * tells us they've changed. When the Guest tries to use the new entry it will + * fault and demand_page() will fix it up. + * + * So with that in mind here's our code to update a (top-level) PGD entry: + */ +void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) +{ + int pgdir; + + if (idx >= SWITCHER_PGD_INDEX) + return; + + /* If they're talking about a page table we have a shadow for... */ + pgdir = find_pgdir(lg, gpgdir); + if (pgdir < ARRAY_SIZE(lg->pgdirs)) + /* ... throw it away. */ + release_pgd(lg->pgdirs[pgdir].pgdir + idx); +} + +#ifdef CONFIG_X86_PAE +/* For setting a mid-level, we just throw everything away. It's easy. */ +void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) +{ + guest_pagetable_clear_all(&lg->cpus[0]); +} +#endif + +/*H:500 + * (vii) Setting up the page tables initially. + * + * When a Guest is first created, set initialize a shadow page table which + * we will populate on future faults. The Guest doesn't have any actual + * pagetables yet, so we set linear_pages to tell demand_page() to fake it + * for the moment. + */ +int init_guest_pagetable(struct lguest *lg) +{ + struct lg_cpu *cpu = &lg->cpus[0]; + int allocated = 0; + + /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */ + cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated); + if (!allocated) + return -ENOMEM; + + /* We start with a linear mapping until the initialize. */ + cpu->linear_pages = true; + return 0; +} + +/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ +void page_table_guest_data_init(struct lg_cpu *cpu) +{ + /* We get the kernel address: above this is all kernel memory. */ + if (get_user(cpu->lg->kernel_address, + &cpu->lg->lguest_data->kernel_address) + /* + * We tell the Guest that it can't use the top 2 or 4 MB + * of virtual addresses used by the Switcher. + */ + || put_user(RESERVE_MEM * 1024 * 1024, + &cpu->lg->lguest_data->reserve_mem)) { + kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); + return; + } + + /* + * In flush_user_mappings() we loop from 0 to + * "pgd_index(lg->kernel_address)". This assumes it won't hit the + * Switcher mappings, so check that now. + */ +#ifdef CONFIG_X86_PAE + if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && + pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) +#else + if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) +#endif + kill_guest(cpu, "bad kernel address %#lx", + cpu->lg->kernel_address); +} + +/* When a Guest dies, our cleanup is fairly simple. */ +void free_guest_pagetable(struct lguest *lg) +{ + unsigned int i; + + /* Throw away all page table pages. */ + release_all_pagetables(lg); + /* Now free the top levels: free_page() can handle 0 just fine. */ + for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) + free_page((long)lg->pgdirs[i].pgdir); +} + +/*H:480 + * (vi) Mapping the Switcher when the Guest is about to run. + * + * The Switcher and the two pages for this CPU need to be visible in the + * Guest (and not the pages for other CPUs). We have the appropriate PTE pages + * for each CPU already set up, we just need to hook them in now we know which + * Guest is about to run on this CPU. + */ +void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) +{ + pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages); + pte_t regs_pte; + +#ifdef CONFIG_X86_PAE + pmd_t switcher_pmd; + pmd_t *pmd_table; + + switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT, + PAGE_KERNEL_EXEC); + + /* Figure out where the pmd page is, by reading the PGD, and converting + * it to a virtual address. */ + pmd_table = __va(pgd_pfn(cpu->lg-> + pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) + << PAGE_SHIFT); + /* Now write it into the shadow page table. */ + set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); +#else + pgd_t switcher_pgd; + + /* + * Make the last PGD entry for this Guest point to the Switcher's PTE + * page for this CPU (with appropriate flags). + */ + switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); + + cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; + +#endif + /* + * We also change the Switcher PTE page. When we're running the Guest, + * we want the Guest's "regs" page to appear where the first Switcher + * page for this CPU is. This is an optimization: when the Switcher + * saves the Guest registers, it saves them into the first page of this + * CPU's "struct lguest_pages": if we make sure the Guest's register + * page is already mapped there, we don't have to copy them out + * again. + */ + regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL); + set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte); +} +/*:*/ + +static void free_switcher_pte_pages(void) +{ + unsigned int i; + + for_each_possible_cpu(i) + free_page((long)switcher_pte_page(i)); +} + +/*H:520 + * Setting up the Switcher PTE page for given CPU is fairly easy, given + * the CPU number and the "struct page"s for the Switcher code itself. + * + * Currently the Switcher is less than a page long, so "pages" is always 1. + */ +static __init void populate_switcher_pte_page(unsigned int cpu, + struct page *switcher_page[], + unsigned int pages) +{ + unsigned int i; + pte_t *pte = switcher_pte_page(cpu); + + /* The first entries are easy: they map the Switcher code. */ + for (i = 0; i < pages; i++) { + set_pte(&pte[i], mk_pte(switcher_page[i], + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); + } + + /* The only other thing we map is this CPU's pair of pages. */ + i = pages + cpu*2; + + /* First page (Guest registers) is writable from the Guest */ + set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); + + /* + * The second page contains the "struct lguest_ro_state", and is + * read-only. + */ + set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); +} + +/* + * We've made it through the page table code. Perhaps our tired brains are + * still processing the details, or perhaps we're simply glad it's over. + * + * If nothing else, note that all this complexity in juggling shadow page tables + * in sync with the Guest's page tables is for one reason: for most Guests this + * page table dance determines how bad performance will be. This is why Xen + * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD + * have implemented shadow page table support directly into hardware. + * + * There is just one file remaining in the Host. + */ + +/*H:510 + * At boot or module load time, init_pagetables() allocates and populates + * the Switcher PTE page for each CPU. + */ +__init int init_pagetables(struct page **switcher_page, unsigned int pages) +{ + unsigned int i; + + for_each_possible_cpu(i) { + switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL); + if (!switcher_pte_page(i)) { + free_switcher_pte_pages(); + return -ENOMEM; + } + populate_switcher_pte_page(i, switcher_page, pages); + } + return 0; +} +/*:*/ + +/* Cleaning up simply involves freeing the PTE page for each CPU. */ +void free_pagetables(void) +{ + free_switcher_pte_pages(); +} diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c new file mode 100644 index 00000000..c4fb424d --- /dev/null +++ b/drivers/lguest/segments.c @@ -0,0 +1,228 @@ +/*P:600 + * The x86 architecture has segments, which involve a table of descriptors + * which can be used to do funky things with virtual address interpretation. + * We originally used to use segments so the Guest couldn't alter the + * Guest<->Host Switcher, and then we had to trim Guest segments, and restore + * for userspace per-thread segments, but trim again for on userspace->kernel + * transitions... This nightmarish creation was contained within this file, + * where we knew not to tread without heavy armament and a change of underwear. + * + * In these modern times, the segment handling code consists of simple sanity + * checks, and the worst you'll experience reading this code is butterfly-rash + * from frolicking through its parklike serenity. +:*/ +#include "lg.h" + +/*H:600 + * Segments & The Global Descriptor Table + * + * (That title sounds like a bad Nerdcore group. Not to suggest that there are + * any good Nerdcore groups, but in high school a friend of mine had a band + * called Joe Fish and the Chips, so there are definitely worse band names). + * + * To refresh: the GDT is a table of 8-byte values describing segments. Once + * set up, these segments can be loaded into one of the 6 "segment registers". + * + * GDT entries are passed around as "struct desc_struct"s, which like IDT + * entries are split into two 32-bit members, "a" and "b". One day, someone + * will clean that up, and be declared a Hero. (No pressure, I'm just saying). + * + * Anyway, the GDT entry contains a base (the start address of the segment), a + * limit (the size of the segment - 1), and some flags. Sounds simple, and it + * would be, except those zany Intel engineers decided that it was too boring + * to put the base at one end, the limit at the other, and the flags in + * between. They decided to shotgun the bits at random throughout the 8 bytes, + * like so: + * + * 0 16 40 48 52 56 63 + * [ limit part 1 ][ base part 1 ][ flags ][li][fl][base ] + * mit ags part 2 + * part 2 + * + * As a result, this file contains a certain amount of magic numeracy. Let's + * begin. + */ + +/* + * There are several entries we don't let the Guest set. The TSS entry is the + * "Task State Segment" which controls all kinds of delicate things. The + * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the + * the Guest can't be trusted to deal with double faults. + */ +static bool ignored_gdt(unsigned int num) +{ + return (num == GDT_ENTRY_TSS + || num == GDT_ENTRY_LGUEST_CS + || num == GDT_ENTRY_LGUEST_DS + || num == GDT_ENTRY_DOUBLEFAULT_TSS); +} + +/*H:630 + * Once the Guest gave us new GDT entries, we fix them up a little. We + * don't care if they're invalid: the worst that can happen is a General + * Protection Fault in the Switcher when it restores a Guest segment register + * which tries to use that entry. Then we kill the Guest for causing such a + * mess: the message will be "unhandled trap 256". + */ +static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) +{ + unsigned int i; + + for (i = start; i < end; i++) { + /* + * We never copy these ones to real GDT, so we don't care what + * they say + */ + if (ignored_gdt(i)) + continue; + + /* + * Segment descriptors contain a privilege level: the Guest is + * sometimes careless and leaves this as 0, even though it's + * running at privilege level 1. If so, we fix it here. + */ + if (cpu->arch.gdt[i].dpl == 0) + cpu->arch.gdt[i].dpl |= GUEST_PL; + + /* + * Each descriptor has an "accessed" bit. If we don't set it + * now, the CPU will try to set it when the Guest first loads + * that entry into a segment register. But the GDT isn't + * writable by the Guest, so bad things can happen. + */ + cpu->arch.gdt[i].type |= 0x1; + } +} + +/*H:610 + * Like the IDT, we never simply use the GDT the Guest gives us. We keep + * a GDT for each CPU, and copy across the Guest's entries each time we want to + * run the Guest on that CPU. + * + * This routine is called at boot or modprobe time for each CPU to set up the + * constant GDT entries: the ones which are the same no matter what Guest we're + * running. + */ +void setup_default_gdt_entries(struct lguest_ro_state *state) +{ + struct desc_struct *gdt = state->guest_gdt; + unsigned long tss = (unsigned long)&state->guest_tss; + + /* The Switcher segments are full 0-4G segments, privilege level 0 */ + gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; + gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; + + /* + * The TSS segment refers to the TSS entry for this particular CPU. + */ + gdt[GDT_ENTRY_TSS].a = 0; + gdt[GDT_ENTRY_TSS].b = 0; + + gdt[GDT_ENTRY_TSS].limit0 = 0x67; + gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF; + gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF; + gdt[GDT_ENTRY_TSS].base2 = tss >> 24; + gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */ + gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */ + gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */ + gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */ + +} + +/* + * This routine sets up the initial Guest GDT for booting. All entries start + * as 0 (unusable). + */ +void setup_guest_gdt(struct lg_cpu *cpu) +{ + /* + * Start with full 0-4G segments...except the Guest is allowed to use + * them, so set the privilege level appropriately in the flags. + */ + cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; + cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; + cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL; + cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL; +} + +/*H:650 + * An optimization of copy_gdt(), for just the three "thead-local storage" + * entries. + */ +void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) +{ + unsigned int i; + + for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++) + gdt[i] = cpu->arch.gdt[i]; +} + +/*H:640 + * When the Guest is run on a different CPU, or the GDT entries have changed, + * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's + * GDT. + */ +void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) +{ + unsigned int i; + + /* + * The default entries from setup_default_gdt_entries() are not + * replaced. See ignored_gdt() above. + */ + for (i = 0; i < GDT_ENTRIES; i++) + if (!ignored_gdt(i)) + gdt[i] = cpu->arch.gdt[i]; +} + +/*H:620 + * This is where the Guest asks us to load a new GDT entry + * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. + */ +void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) +{ + /* + * We assume the Guest has the same number of GDT entries as the + * Host, otherwise we'd have to dynamically allocate the Guest GDT. + */ + if (num >= ARRAY_SIZE(cpu->arch.gdt)) { + kill_guest(cpu, "too many gdt entries %i", num); + return; + } + + /* Set it up, then fix it. */ + cpu->arch.gdt[num].a = lo; + cpu->arch.gdt[num].b = hi; + fixup_gdt_table(cpu, num, num+1); + /* + * Mark that the GDT changed so the core knows it has to copy it again, + * even if the Guest is run on the same CPU. + */ + cpu->changed |= CHANGED_GDT; +} + +/* + * This is the fast-track version for just changing the three TLS entries. + * Remember that this happens on every context switch, so it's worth + * optimizing. But wouldn't it be neater to have a single hypercall to cover + * both cases? + */ +void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) +{ + struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; + + __lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); + fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); + /* Note that just the TLS entries have changed. */ + cpu->changed |= CHANGED_GDT_TLS; +} + +/*H:660 + * With this, we have finished the Host. + * + * Five of the seven parts of our task are complete. You have made it through + * the Bit of Despair (I think that's somewhere in the page table code, + * myself). + * + * Next, we examine "make Switcher". It's short, but intense. + */ diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c new file mode 100644 index 00000000..39809035 --- /dev/null +++ b/drivers/lguest/x86/core.c @@ -0,0 +1,715 @@ +/* + * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. + * Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +/*P:450 + * This file contains the x86-specific lguest code. It used to be all + * mixed in with drivers/lguest/core.c but several foolhardy code slashers + * wrestled most of the dependencies out to here in preparation for porting + * lguest to other architectures (see what I mean by foolhardy?). + * + * This also contains a couple of non-obvious setup and teardown pieces which + * were implemented after days of debugging pain. +:*/ +#include <linux/kernel.h> +#include <linux/start_kernel.h> +#include <linux/string.h> +#include <linux/console.h> +#include <linux/screen_info.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/lguest.h> +#include <linux/lguest_launcher.h> +#include <asm/paravirt.h> +#include <asm/param.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/desc.h> +#include <asm/setup.h> +#include <asm/lguest.h> +#include <asm/uaccess.h> +#include <asm/i387.h> +#include "../lg.h" + +static int cpu_had_pge; + +static struct { + unsigned long offset; + unsigned short segment; +} lguest_entry; + +/* Offset from where switcher.S was compiled to where we've copied it */ +static unsigned long switcher_offset(void) +{ + return SWITCHER_ADDR - (unsigned long)start_switcher_text; +} + +/* This cpu's struct lguest_pages. */ +static struct lguest_pages *lguest_pages(unsigned int cpu) +{ + return &(((struct lguest_pages *) + (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); +} + +static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu); + +/*S:010 + * We approach the Switcher. + * + * Remember that each CPU has two pages which are visible to the Guest when it + * runs on that CPU. This has to contain the state for that Guest: we copy the + * state in just before we run the Guest. + * + * Each Guest has "changed" flags which indicate what has changed in the Guest + * since it last ran. We saw this set in interrupts_and_traps.c and + * segments.c. + */ +static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) +{ + /* + * Copying all this data can be quite expensive. We usually run the + * same Guest we ran last time (and that Guest hasn't run anywhere else + * meanwhile). If that's not the case, we pretend everything in the + * Guest has changed. + */ + if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { + __this_cpu_write(lg_last_cpu, cpu); + cpu->last_pages = pages; + cpu->changed = CHANGED_ALL; + } + + /* + * These copies are pretty cheap, so we do them unconditionally: */ + /* Save the current Host top-level page directory. + */ + pages->state.host_cr3 = __pa(current->mm->pgd); + /* + * Set up the Guest's page tables to see this CPU's pages (and no + * other CPU's pages). + */ + map_switcher_in_guest(cpu, pages); + /* + * Set up the two "TSS" members which tell the CPU what stack to use + * for traps which do directly into the Guest (ie. traps at privilege + * level 1). + */ + pages->state.guest_tss.sp1 = cpu->esp1; + pages->state.guest_tss.ss1 = cpu->ss1; + + /* Copy direct-to-Guest trap entries. */ + if (cpu->changed & CHANGED_IDT) + copy_traps(cpu, pages->state.guest_idt, default_idt_entries); + + /* Copy all GDT entries which the Guest can change. */ + if (cpu->changed & CHANGED_GDT) + copy_gdt(cpu, pages->state.guest_gdt); + /* If only the TLS entries have changed, copy them. */ + else if (cpu->changed & CHANGED_GDT_TLS) + copy_gdt_tls(cpu, pages->state.guest_gdt); + + /* Mark the Guest as unchanged for next time. */ + cpu->changed = 0; +} + +/* Finally: the code to actually call into the Switcher to run the Guest. */ +static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages) +{ + /* This is a dummy value we need for GCC's sake. */ + unsigned int clobber; + + /* + * Copy the guest-specific information into this CPU's "struct + * lguest_pages". + */ + copy_in_guest_info(cpu, pages); + + /* + * Set the trap number to 256 (impossible value). If we fault while + * switching to the Guest (bad segment registers or bug), this will + * cause us to abort the Guest. + */ + cpu->regs->trapnum = 256; + + /* + * Now: we push the "eflags" register on the stack, then do an "lcall". + * This is how we change from using the kernel code segment to using + * the dedicated lguest code segment, as well as jumping into the + * Switcher. + * + * The lcall also pushes the old code segment (KERNEL_CS) onto the + * stack, then the address of this call. This stack layout happens to + * exactly match the stack layout created by an interrupt... + */ + asm volatile("pushf; lcall *lguest_entry" + /* + * This is how we tell GCC that %eax ("a") and %ebx ("b") + * are changed by this routine. The "=" means output. + */ + : "=a"(clobber), "=b"(clobber) + /* + * %eax contains the pages pointer. ("0" refers to the + * 0-th argument above, ie "a"). %ebx contains the + * physical address of the Guest's top-level page + * directory. + */ + : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)) + /* + * We tell gcc that all these registers could change, + * which means we don't have to save and restore them in + * the Switcher. + */ + : "memory", "%edx", "%ecx", "%edi", "%esi"); +} +/*:*/ + +/*M:002 + * There are hooks in the scheduler which we can register to tell when we + * get kicked off the CPU (preempt_notifier_register()). This would allow us + * to lazily disable SYSENTER which would regain some performance, and should + * also simplify copy_in_guest_info(). Note that we'd still need to restore + * things when we exit to Launcher userspace, but that's fairly easy. + * + * We could also try using these hooks for PGE, but that might be too expensive. + * + * The hooks were designed for KVM, but we can also put them to good use. +:*/ + +/*H:040 + * This is the i386-specific code to setup and run the Guest. Interrupts + * are disabled: we own the CPU. + */ +void lguest_arch_run_guest(struct lg_cpu *cpu) +{ + /* + * Remember the awfully-named TS bit? If the Guest has asked to set it + * we set it now, so we can trap and pass that trap to the Guest if it + * uses the FPU. + */ + if (cpu->ts) + unlazy_fpu(current); + + /* + * SYSENTER is an optimized way of doing system calls. We can't allow + * it because it always jumps to privilege level 0. A normal Guest + * won't try it because we don't advertise it in CPUID, but a malicious + * Guest (or malicious Guest userspace program) could, so we tell the + * CPU to disable it before running the Guest. + */ + if (boot_cpu_has(X86_FEATURE_SEP)) + wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); + + /* + * Now we actually run the Guest. It will return when something + * interesting happens, and we can examine its registers to see what it + * was doing. + */ + run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); + + /* + * Note that the "regs" structure contains two extra entries which are + * not really registers: a trap number which says what interrupt or + * trap made the switcher code come back, and an error code which some + * traps set. + */ + + /* Restore SYSENTER if it's supposed to be on. */ + if (boot_cpu_has(X86_FEATURE_SEP)) + wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); + + /* + * If the Guest page faulted, then the cr2 register will tell us the + * bad virtual address. We have to grab this now, because once we + * re-enable interrupts an interrupt could fault and thus overwrite + * cr2, or we could even move off to a different CPU. + */ + if (cpu->regs->trapnum == 14) + cpu->arch.last_pagefault = read_cr2(); + /* + * Similarly, if we took a trap because the Guest used the FPU, + * we have to restore the FPU it expects to see. + * math_state_restore() may sleep and we may even move off to + * a different CPU. So all the critical stuff should be done + * before this. + */ + else if (cpu->regs->trapnum == 7) + math_state_restore(); +} + +/*H:130 + * Now we've examined the hypercall code; our Guest can make requests. + * Our Guest is usually so well behaved; it never tries to do things it isn't + * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual + * infrastructure isn't quite complete, because it doesn't contain replacements + * for the Intel I/O instructions. As a result, the Guest sometimes fumbles + * across one during the boot process as it probes for various things which are + * usually attached to a PC. + * + * When the Guest uses one of these instructions, we get a trap (General + * Protection Fault) and come here. We see if it's one of those troublesome + * instructions and skip over it. We return true if we did. + */ +static int emulate_insn(struct lg_cpu *cpu) +{ + u8 insn; + unsigned int insnlen = 0, in = 0, small_operand = 0; + /* + * The eip contains the *virtual* address of the Guest's instruction: + * walk the Guest's page tables to find the "physical" address. + */ + unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); + + /* + * This must be the Guest kernel trying to do something, not userspace! + * The bottom two bits of the CS segment register are the privilege + * level. + */ + if ((cpu->regs->cs & 3) != GUEST_PL) + return 0; + + /* Decoding x86 instructions is icky. */ + insn = lgread(cpu, physaddr, u8); + + /* + * Around 2.6.33, the kernel started using an emulation for the + * cmpxchg8b instruction in early boot on many configurations. This + * code isn't paravirtualized, and it tries to disable interrupts. + * Ignore it, which will Mostly Work. + */ + if (insn == 0xfa) { + /* "cli", or Clear Interrupt Enable instruction. Skip it. */ + cpu->regs->eip++; + return 1; + } + + /* + * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out. + */ + if (insn == 0x66) { + small_operand = 1; + /* The instruction is 1 byte so far, read the next byte. */ + insnlen = 1; + insn = lgread(cpu, physaddr + insnlen, u8); + } + + /* + * We can ignore the lower bit for the moment and decode the 4 opcodes + * we need to emulate. + */ + switch (insn & 0xFE) { + case 0xE4: /* in <next byte>,%al */ + insnlen += 2; + in = 1; + break; + case 0xEC: /* in (%dx),%al */ + insnlen += 1; + in = 1; + break; + case 0xE6: /* out %al,<next byte> */ + insnlen += 2; + break; + case 0xEE: /* out %al,(%dx) */ + insnlen += 1; + break; + default: + /* OK, we don't know what this is, can't emulate. */ + return 0; + } + + /* + * If it was an "IN" instruction, they expect the result to be read + * into %eax, so we change %eax. We always return all-ones, which + * traditionally means "there's nothing there". + */ + if (in) { + /* Lower bit tells means it's a 32/16 bit access */ + if (insn & 0x1) { + if (small_operand) + cpu->regs->eax |= 0xFFFF; + else + cpu->regs->eax = 0xFFFFFFFF; + } else + cpu->regs->eax |= 0xFF; + } + /* Finally, we've "done" the instruction, so move past it. */ + cpu->regs->eip += insnlen; + /* Success! */ + return 1; +} + +/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ +void lguest_arch_handle_trap(struct lg_cpu *cpu) +{ + switch (cpu->regs->trapnum) { + case 13: /* We've intercepted a General Protection Fault. */ + /* + * Check if this was one of those annoying IN or OUT + * instructions which we need to emulate. If so, we just go + * back into the Guest after we've done it. + */ + if (cpu->regs->errcode == 0) { + if (emulate_insn(cpu)) + return; + } + break; + case 14: /* We've intercepted a Page Fault. */ + /* + * The Guest accessed a virtual address that wasn't mapped. + * This happens a lot: we don't actually set up most of the page + * tables for the Guest at all when we start: as it runs it asks + * for more and more, and we set them up as required. In this + * case, we don't even tell the Guest that the fault happened. + * + * The errcode tells whether this was a read or a write, and + * whether kernel or userspace code. + */ + if (demand_page(cpu, cpu->arch.last_pagefault, + cpu->regs->errcode)) + return; + + /* + * OK, it's really not there (or not OK): the Guest needs to + * know. We write out the cr2 value so it knows where the + * fault occurred. + * + * Note that if the Guest were really messed up, this could + * happen before it's done the LHCALL_LGUEST_INIT hypercall, so + * lg->lguest_data could be NULL + */ + if (cpu->lg->lguest_data && + put_user(cpu->arch.last_pagefault, + &cpu->lg->lguest_data->cr2)) + kill_guest(cpu, "Writing cr2"); + break; + case 7: /* We've intercepted a Device Not Available fault. */ + /* + * If the Guest doesn't want to know, we already restored the + * Floating Point Unit, so we just continue without telling it. + */ + if (!cpu->ts) + return; + break; + case 32 ... 255: + /* + * These values mean a real interrupt occurred, in which case + * the Host handler has already been run. We just do a + * friendly check if another process should now be run, then + * return to run the Guest again. + */ + cond_resched(); + return; + case LGUEST_TRAP_ENTRY: + /* + * Our 'struct hcall_args' maps directly over our regs: we set + * up the pointer now to indicate a hypercall is pending. + */ + cpu->hcall = (struct hcall_args *)cpu->regs; + return; + } + + /* We didn't handle the trap, so it needs to go to the Guest. */ + if (!deliver_trap(cpu, cpu->regs->trapnum)) + /* + * If the Guest doesn't have a handler (either it hasn't + * registered any yet, or it's one of the faults we don't let + * it handle), it dies with this cryptic error message. + */ + kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", + cpu->regs->trapnum, cpu->regs->eip, + cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault + : cpu->regs->errcode); +} + +/* + * Now we can look at each of the routines this calls, in increasing order of + * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), + * deliver_trap() and demand_page(). After all those, we'll be ready to + * examine the Switcher, and our philosophical understanding of the Host/Guest + * duality will be complete. +:*/ +static void adjust_pge(void *on) +{ + if (on) + write_cr4(read_cr4() | X86_CR4_PGE); + else + write_cr4(read_cr4() & ~X86_CR4_PGE); +} + +/*H:020 + * Now the Switcher is mapped and every thing else is ready, we need to do + * some more i386-specific initialization. + */ +void __init lguest_arch_host_init(void) +{ + int i; + + /* + * Most of the x86/switcher_32.S doesn't care that it's been moved; on + * Intel, jumps are relative, and it doesn't access any references to + * external code or data. + * + * The only exception is the interrupt handlers in switcher.S: their + * addresses are placed in a table (default_idt_entries), so we need to + * update the table with the new addresses. switcher_offset() is a + * convenience function which returns the distance between the + * compiled-in switcher code and the high-mapped copy we just made. + */ + for (i = 0; i < IDT_ENTRIES; i++) + default_idt_entries[i] += switcher_offset(); + + /* + * Set up the Switcher's per-cpu areas. + * + * Each CPU gets two pages of its own within the high-mapped region + * (aka. "struct lguest_pages"). Much of this can be initialized now, + * but some depends on what Guest we are running (which is set up in + * copy_in_guest_info()). + */ + for_each_possible_cpu(i) { + /* lguest_pages() returns this CPU's two pages. */ + struct lguest_pages *pages = lguest_pages(i); + /* This is a convenience pointer to make the code neater. */ + struct lguest_ro_state *state = &pages->state; + + /* + * The Global Descriptor Table: the Host has a different one + * for each CPU. We keep a descriptor for the GDT which says + * where it is and how big it is (the size is actually the last + * byte, not the size, hence the "-1"). + */ + state->host_gdt_desc.size = GDT_SIZE-1; + state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); + + /* + * All CPUs on the Host use the same Interrupt Descriptor + * Table, so we just use store_idt(), which gets this CPU's IDT + * descriptor. + */ + store_idt(&state->host_idt_desc); + + /* + * The descriptors for the Guest's GDT and IDT can be filled + * out now, too. We copy the GDT & IDT into ->guest_gdt and + * ->guest_idt before actually running the Guest. + */ + state->guest_idt_desc.size = sizeof(state->guest_idt)-1; + state->guest_idt_desc.address = (long)&state->guest_idt; + state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; + state->guest_gdt_desc.address = (long)&state->guest_gdt; + + /* + * We know where we want the stack to be when the Guest enters + * the Switcher: in pages->regs. The stack grows upwards, so + * we start it at the end of that structure. + */ + state->guest_tss.sp0 = (long)(&pages->regs + 1); + /* + * And this is the GDT entry to use for the stack: we keep a + * couple of special LGUEST entries. + */ + state->guest_tss.ss0 = LGUEST_DS; + + /* + * x86 can have a finegrained bitmap which indicates what I/O + * ports the process can use. We set it to the end of our + * structure, meaning "none". + */ + state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); + + /* + * Some GDT entries are the same across all Guests, so we can + * set them up now. + */ + setup_default_gdt_entries(state); + /* Most IDT entries are the same for all Guests, too.*/ + setup_default_idt_entries(state, default_idt_entries); + + /* + * The Host needs to be able to use the LGUEST segments on this + * CPU, too, so put them in the Host GDT. + */ + get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; + get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; + } + + /* + * In the Switcher, we want the %cs segment register to use the + * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so + * it will be undisturbed when we switch. To change %cs and jump we + * need this structure to feed to Intel's "lcall" instruction. + */ + lguest_entry.offset = (long)switch_to_guest + switcher_offset(); + lguest_entry.segment = LGUEST_CS; + + /* + * Finally, we need to turn off "Page Global Enable". PGE is an + * optimization where page table entries are specially marked to show + * they never change. The Host kernel marks all the kernel pages this + * way because it's always present, even when userspace is running. + * + * Lguest breaks this: unbeknownst to the rest of the Host kernel, we + * switch to the Guest kernel. If you don't disable this on all CPUs, + * you'll get really weird bugs that you'll chase for two days. + * + * I used to turn PGE off every time we switched to the Guest and back + * on when we return, but that slowed the Switcher down noticibly. + */ + + /* + * We don't need the complexity of CPUs coming and going while we're + * doing this. + */ + get_online_cpus(); + if (cpu_has_pge) { /* We have a broader idea of "global". */ + /* Remember that this was originally set (for cleanup). */ + cpu_had_pge = 1; + /* + * adjust_pge is a helper function which sets or unsets the PGE + * bit on its CPU, depending on the argument (0 == unset). + */ + on_each_cpu(adjust_pge, (void *)0, 1); + /* Turn off the feature in the global feature set. */ + clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); + } + put_online_cpus(); +} +/*:*/ + +void __exit lguest_arch_host_fini(void) +{ + /* If we had PGE before we started, turn it back on now. */ + get_online_cpus(); + if (cpu_had_pge) { + set_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); + /* adjust_pge's argument "1" means set PGE. */ + on_each_cpu(adjust_pge, (void *)1, 1); + } + put_online_cpus(); +} + + +/*H:122 The i386-specific hypercalls simply farm out to the right functions. */ +int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args) +{ + switch (args->arg0) { + case LHCALL_LOAD_GDT_ENTRY: + load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3); + break; + case LHCALL_LOAD_IDT_ENTRY: + load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3); + break; + case LHCALL_LOAD_TLS: + guest_load_tls(cpu, args->arg1); + break; + default: + /* Bad Guest. Bad! */ + return -EIO; + } + return 0; +} + +/*H:126 i386-specific hypercall initialization: */ +int lguest_arch_init_hypercalls(struct lg_cpu *cpu) +{ + u32 tsc_speed; + + /* + * The pointer to the Guest's "struct lguest_data" is the only argument. + * We check that address now. + */ + if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, + sizeof(*cpu->lg->lguest_data))) + return -EFAULT; + + /* + * Having checked it, we simply set lg->lguest_data to point straight + * into the Launcher's memory at the right place and then use + * copy_to_user/from_user from now on, instead of lgread/write. I put + * this in to show that I'm not immune to writing stupid + * optimizations. + */ + cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; + + /* + * We insist that the Time Stamp Counter exist and doesn't change with + * cpu frequency. Some devious chip manufacturers decided that TSC + * changes could be handled in software. I decided that time going + * backwards might be good for benchmarks, but it's bad for users. + * + * We also insist that the TSC be stable: the kernel detects unreliable + * TSCs for its own purposes, and we use that here. + */ + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) + tsc_speed = tsc_khz; + else + tsc_speed = 0; + if (put_user(tsc_speed, &cpu->lg->lguest_data->tsc_khz)) + return -EFAULT; + + /* The interrupt code might not like the system call vector. */ + if (!check_syscall_vector(cpu->lg)) + kill_guest(cpu, "bad syscall vector"); + + return 0; +} +/*:*/ + +/*L:030 + * Most of the Guest's registers are left alone: we used get_zeroed_page() to + * allocate the structure, so they will be 0. + */ +void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) +{ + struct lguest_regs *regs = cpu->regs; + + /* + * There are four "segment" registers which the Guest needs to boot: + * The "code segment" register (cs) refers to the kernel code segment + * __KERNEL_CS, and the "data", "extra" and "stack" segment registers + * refer to the kernel data segment __KERNEL_DS. + * + * The privilege level is packed into the lower bits. The Guest runs + * at privilege level 1 (GUEST_PL). + */ + regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; + regs->cs = __KERNEL_CS|GUEST_PL; + + /* + * The "eflags" register contains miscellaneous flags. Bit 1 (0x002) + * is supposed to always be "1". Bit 9 (0x200) controls whether + * interrupts are enabled. We always leave interrupts enabled while + * running the Guest. + */ + regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; + + /* + * The "Extended Instruction Pointer" register says where the Guest is + * running. + */ + regs->eip = start; + + /* + * %esi points to our boot information, at physical address 0, so don't + * touch it. + */ + + /* There are a couple of GDT entries the Guest expects at boot. */ + setup_guest_gdt(cpu); +} diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S new file mode 100644 index 00000000..40634b0d --- /dev/null +++ b/drivers/lguest/x86/switcher_32.S @@ -0,0 +1,388 @@ +/*P:900 + * This is the Switcher: code which sits at 0xFFC00000 (or 0xFFE00000) astride + * both the Host and Guest to do the low-level Guest<->Host switch. It is as + * simple as it can be made, but it's naturally very specific to x86. + * + * You have now completed Preparation. If this has whet your appetite; if you + * are feeling invigorated and refreshed then the next, more challenging stage + * can be found in "make Guest". + :*/ + +/*M:012 + * Lguest is meant to be simple: my rule of thumb is that 1% more LOC must + * gain at least 1% more performance. Since neither LOC nor performance can be + * measured beforehand, it generally means implementing a feature then deciding + * if it's worth it. And once it's implemented, who can say no? + * + * This is why I haven't implemented this idea myself. I want to, but I + * haven't. You could, though. + * + * The main place where lguest performance sucks is Guest page faulting. When + * a Guest userspace process hits an unmapped page we switch back to the Host, + * walk the page tables, find it's not mapped, switch back to the Guest page + * fault handler, which calls a hypercall to set the page table entry, then + * finally returns to userspace. That's two round-trips. + * + * If we had a small walker in the Switcher, we could quickly check the Guest + * page table and if the page isn't mapped, immediately reflect the fault back + * into the Guest. This means the Switcher would have to know the top of the + * Guest page table and the page fault handler address. + * + * For simplicity, the Guest should only handle the case where the privilege + * level of the fault is 3 and probably only not present or write faults. It + * should also detect recursive faults, and hand the original fault to the + * Host (which is actually really easy). + * + * Two questions remain. Would the performance gain outweigh the complexity? + * And who would write the verse documenting it? +:*/ + +/*M:011 + * Lguest64 handles NMI. This gave me NMI envy (until I looked at their + * code). It's worth doing though, since it would let us use oprofile in the + * Host when a Guest is running. +:*/ + +/*S:100 + * Welcome to the Switcher itself! + * + * This file contains the low-level code which changes the CPU to run the Guest + * code, and returns to the Host when something happens. Understand this, and + * you understand the heart of our journey. + * + * Because this is in assembler rather than C, our tale switches from prose to + * verse. First I tried limericks: + * + * There once was an eax reg, + * To which our pointer was fed, + * It needed an add, + * Which asm-offsets.h had + * But this limerick is hurting my head. + * + * Next I tried haikus, but fitting the required reference to the seasons in + * every stanza was quickly becoming tiresome: + * + * The %eax reg + * Holds "struct lguest_pages" now: + * Cherry blossoms fall. + * + * Then I started with Heroic Verse, but the rhyming requirement leeched away + * the content density and led to some uniquely awful oblique rhymes: + * + * These constants are coming from struct offsets + * For use within the asm switcher text. + * + * Finally, I settled for something between heroic hexameter, and normal prose + * with inappropriate linebreaks. Anyway, it aint no Shakespeare. + */ + +// Not all kernel headers work from assembler +// But these ones are needed: the ENTRY() define +// And constants extracted from struct offsets +// To avoid magic numbers and breakage: +// Should they change the compiler can't save us +// Down here in the depths of assembler code. +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/segment.h> +#include <asm/lguest.h> + +// We mark the start of the code to copy +// It's placed in .text tho it's never run here +// You'll see the trick macro at the end +// Which interleaves data and text to effect. +.text +ENTRY(start_switcher_text) + +// When we reach switch_to_guest we have just left +// The safe and comforting shores of C code +// %eax has the "struct lguest_pages" to use +// Where we save state and still see it from the Guest +// And %ebx holds the Guest shadow pagetable: +// Once set we have truly left Host behind. +ENTRY(switch_to_guest) + // We told gcc all its regs could fade, + // Clobbered by our journey into the Guest + // We could have saved them, if we tried + // But time is our master and cycles count. + + // Segment registers must be saved for the Host + // We push them on the Host stack for later + pushl %es + pushl %ds + pushl %gs + pushl %fs + // But the compiler is fickle, and heeds + // No warning of %ebp clobbers + // When frame pointers are used. That register + // Must be saved and restored or chaos strikes. + pushl %ebp + // The Host's stack is done, now save it away + // In our "struct lguest_pages" at offset + // Distilled into asm-offsets.h + movl %esp, LGUEST_PAGES_host_sp(%eax) + + // All saved and there's now five steps before us: + // Stack, GDT, IDT, TSS + // Then last of all the page tables are flipped. + + // Yet beware that our stack pointer must be + // Always valid lest an NMI hits + // %edx does the duty here as we juggle + // %eax is lguest_pages: our stack lies within. + movl %eax, %edx + addl $LGUEST_PAGES_regs, %edx + movl %edx, %esp + + // The Guest's GDT we so carefully + // Placed in the "struct lguest_pages" before + lgdt LGUEST_PAGES_guest_gdt_desc(%eax) + + // The Guest's IDT we did partially + // Copy to "struct lguest_pages" as well. + lidt LGUEST_PAGES_guest_idt_desc(%eax) + + // The TSS entry which controls traps + // Must be loaded up with "ltr" now: + // The GDT entry that TSS uses + // Changes type when we load it: damn Intel! + // For after we switch over our page tables + // That entry will be read-only: we'd crash. + movl $(GDT_ENTRY_TSS*8), %edx + ltr %dx + + // Look back now, before we take this last step! + // The Host's TSS entry was also marked used; + // Let's clear it again for our return. + // The GDT descriptor of the Host + // Points to the table after two "size" bytes + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx + // Clear "used" from type field (byte 5, bit 2) + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) + + // Once our page table's switched, the Guest is live! + // The Host fades as we run this final step. + // Our "struct lguest_pages" is now read-only. + movl %ebx, %cr3 + + // The page table change did one tricky thing: + // The Guest's register page has been mapped + // Writable under our %esp (stack) -- + // We can simply pop off all Guest regs. + popl %eax + popl %ebx + popl %ecx + popl %edx + popl %esi + popl %edi + popl %ebp + popl %gs + popl %fs + popl %ds + popl %es + + // Near the base of the stack lurk two strange fields + // Which we fill as we exit the Guest + // These are the trap number and its error + // We can simply step past them on our way. + addl $8, %esp + + // The last five stack slots hold return address + // And everything needed to switch privilege + // From Switcher's level 0 to Guest's 1, + // And the stack where the Guest had last left it. + // Interrupts are turned back on: we are Guest. + iret + +// We tread two paths to switch back to the Host +// Yet both must save Guest state and restore Host +// So we put the routine in a macro. +#define SWITCH_TO_HOST \ + /* We save the Guest state: all registers first \ + * Laid out just as "struct lguest_regs" defines */ \ + pushl %es; \ + pushl %ds; \ + pushl %fs; \ + pushl %gs; \ + pushl %ebp; \ + pushl %edi; \ + pushl %esi; \ + pushl %edx; \ + pushl %ecx; \ + pushl %ebx; \ + pushl %eax; \ + /* Our stack and our code are using segments \ + * Set in the TSS and IDT \ + * Yet if we were to touch data we'd use \ + * Whatever data segment the Guest had. \ + * Load the lguest ds segment for now. */ \ + movl $(LGUEST_DS), %eax; \ + movl %eax, %ds; \ + /* So where are we? Which CPU, which struct? \ + * The stack is our clue: our TSS starts \ + * It at the end of "struct lguest_pages". \ + * Or we may have stumbled while restoring \ + * Our Guest segment regs while in switch_to_guest, \ + * The fault pushed atop that part-unwound stack. \ + * If we round the stack down to the page start \ + * We're at the start of "struct lguest_pages". */ \ + movl %esp, %eax; \ + andl $(~(1 << PAGE_SHIFT - 1)), %eax; \ + /* Save our trap number: the switch will obscure it \ + * (In the Host the Guest regs are not mapped here) \ + * %ebx holds it safe for deliver_to_host */ \ + movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \ + /* The Host GDT, IDT and stack! \ + * All these lie safely hidden from the Guest: \ + * We must return to the Host page tables \ + * (Hence that was saved in struct lguest_pages) */ \ + movl LGUEST_PAGES_host_cr3(%eax), %edx; \ + movl %edx, %cr3; \ + /* As before, when we looked back at the Host \ + * As we left and marked TSS unused \ + * So must we now for the Guest left behind. */ \ + andb $0xFD, (LGUEST_PAGES_guest_gdt+GDT_ENTRY_TSS*8+5)(%eax); \ + /* Switch to Host's GDT, IDT. */ \ + lgdt LGUEST_PAGES_host_gdt_desc(%eax); \ + lidt LGUEST_PAGES_host_idt_desc(%eax); \ + /* Restore the Host's stack where its saved regs lie */ \ + movl LGUEST_PAGES_host_sp(%eax), %esp; \ + /* Last the TSS: our Host is returned */ \ + movl $(GDT_ENTRY_TSS*8), %edx; \ + ltr %dx; \ + /* Restore now the regs saved right at the first. */ \ + popl %ebp; \ + popl %fs; \ + popl %gs; \ + popl %ds; \ + popl %es + +// The first path is trod when the Guest has trapped: +// (Which trap it was has been pushed on the stack). +// We need only switch back, and the Host will decode +// Why we came home, and what needs to be done. +return_to_host: + SWITCH_TO_HOST + iret + +// We are lead to the second path like so: +// An interrupt, with some cause external +// Has ajerked us rudely from the Guest's code +// Again we must return home to the Host +deliver_to_host: + SWITCH_TO_HOST + // But now we must go home via that place + // Where that interrupt was supposed to go + // Had we not been ensconced, running the Guest. + // Here we see the trickness of run_guest_once(): + // The Host stack is formed like an interrupt + // With EIP, CS and EFLAGS layered. + // Interrupt handlers end with "iret" + // And that will take us home at long long last. + + // But first we must find the handler to call! + // The IDT descriptor for the Host + // Has two bytes for size, and four for address: + // %edx will hold it for us for now. + movl (LGUEST_PAGES_host_idt_desc+2)(%eax), %edx + // We now know the table address we need, + // And saved the trap's number inside %ebx. + // Yet the pointer to the handler is smeared + // Across the bits of the table entry. + // What oracle can tell us how to extract + // From such a convoluted encoding? + // I consulted gcc, and it gave + // These instructions, which I gladly credit: + leal (%edx,%ebx,8), %eax + movzwl (%eax),%edx + movl 4(%eax), %eax + xorw %ax, %ax + orl %eax, %edx + // Now the address of the handler's in %edx + // We call it now: its "iret" drops us home. + jmp *%edx + +// Every interrupt can come to us here +// But we must truly tell each apart. +// They number two hundred and fifty six +// And each must land in a different spot, +// Push its number on stack, and join the stream. + +// And worse, a mere six of the traps stand apart +// And push on their stack an addition: +// An error number, thirty two bits long +// So we punish the other two fifty +// And make them push a zero so they match. + +// Yet two fifty six entries is long +// And all will look most the same as the last +// So we create a macro which can make +// As many entries as we need to fill. + +// Note the change to .data then .text: +// We plant the address of each entry +// Into a (data) table for the Host +// To know where each Guest interrupt should go. +.macro IRQ_STUB N TARGET + .data; .long 1f; .text; 1: + // Trap eight, ten through fourteen and seventeen + // Supply an error number. Else zero. + .if (\N <> 8) && (\N < 10 || \N > 14) && (\N <> 17) + pushl $0 + .endif + pushl $\N + jmp \TARGET + ALIGN +.endm + +// This macro creates numerous entries +// Using GAS macros which out-power C's. +.macro IRQ_STUBS FIRST LAST TARGET + irq=\FIRST + .rept \LAST-\FIRST+1 + IRQ_STUB irq \TARGET + irq=irq+1 + .endr +.endm + +// Here's the marker for our pointer table +// Laid in the data section just before +// Each macro places the address of code +// Forming an array: each one points to text +// Which handles interrupt in its turn. +.data +.global default_idt_entries +default_idt_entries: +.text + // The first two traps go straight back to the Host + IRQ_STUBS 0 1 return_to_host + // We'll say nothing, yet, about NMI + IRQ_STUB 2 handle_nmi + // Other traps also return to the Host + IRQ_STUBS 3 31 return_to_host + // All interrupts go via their handlers + IRQ_STUBS 32 127 deliver_to_host + // 'Cept system calls coming from userspace + // Are to go to the Guest, never the Host. + IRQ_STUB 128 return_to_host + IRQ_STUBS 129 255 deliver_to_host + +// The NMI, what a fabulous beast +// Which swoops in and stops us no matter that +// We're suspended between heaven and hell, +// (Or more likely between the Host and Guest) +// When in it comes! We are dazed and confused +// So we do the simplest thing which one can. +// Though we've pushed the trap number and zero +// We discard them, return, and hope we live. +handle_nmi: + addl $8, %esp + iret + +// We are done; all that's left is Mastery +// And "make Mastery" is a journey long +// Designed to make your fingers itch to code. + +// Here ends the text, the file and poem. +ENTRY(end_switcher_text) |