aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBastian Blank <waldi@debian.org>2006-03-20 10:44:58 +0000
committerBastian Blank <waldi@debian.org>2006-03-20 10:44:58 +0000
commit5d02d7b39829f5b215bddbd58400d8b027e214f7 (patch)
tree9a74c8ed6f5f7684b28444ae5fb9b636dc7ec920
parentdcb3ff6aa0a5d013264bfc43762d4179b44a3ecd (diff)
downloadkernel_replicant_linux-5d02d7b39829f5b215bddbd58400d8b027e214f7.tar.gz
kernel_replicant_linux-5d02d7b39829f5b215bddbd58400d8b027e214f7.tar.bz2
kernel_replicant_linux-5d02d7b39829f5b215bddbd58400d8b027e214f7.zip
* debian/arch/amd64/defines, debian/arch/i386/defines:
Disable xen and xen-vserver subarches. * debian/patches/series/1-extra: Remove xen patches. * debian/patches/vserver-xen-clash.patch, debian/patches/xen-tree-merge-21966.patch: Remove. svn path=/dists/sid/linux-2.6/; revision=6229
-rw-r--r--debian/arch/amd64/defines2
-rw-r--r--debian/arch/i386/defines1
-rw-r--r--debian/patches/series/1-extra2
-rw-r--r--debian/patches/vserver-xen-clash.patch60
-rw-r--r--debian/patches/xen-tree-merge-21966.patch77857
5 files changed, 0 insertions, 77922 deletions
diff --git a/debian/arch/amd64/defines b/debian/arch/amd64/defines
index 0957b328789d..c288bf6aa08b 100644
--- a/debian/arch/amd64/defines
+++ b/debian/arch/amd64/defines
@@ -9,8 +9,6 @@ kernel-arch: x86_64
kernel-header-dirs: x86_64
subarches:
vserver
- xen
- xen-vserver
[image]
depends: e2fsprogs (>= 1.35-7)
diff --git a/debian/arch/i386/defines b/debian/arch/i386/defines
index cdfe6e3cfa93..1ddd0cd999fc 100644
--- a/debian/arch/i386/defines
+++ b/debian/arch/i386/defines
@@ -9,7 +9,6 @@ kernel-arch: i386
kernel-header-dirs: i386
subarches:
vserver
- xen
[image]
suggests: grub | lilo (>= 19.1)
diff --git a/debian/patches/series/1-extra b/debian/patches/series/1-extra
index 4053ee3802de..35a499df84de 100644
--- a/debian/patches/series/1-extra
+++ b/debian/patches/series/1-extra
@@ -2,7 +2,5 @@
#+ arm-nslu2-maclist.patch arm armeb
+ vserver-version.patch *_vserver *_xen-vserver
+ vserver-vs2.0.2-rc13.patch *_vserver *_xen-vserver
-+ vserver-xen-clash.patch *_xen-vserver
-+ xen-tree-merge-21966.patch *_xen *_xen-vserver
#+ mips-tulip.patch mipsel
#+ mips-tulip_dc21143.patch mipsel
diff --git a/debian/patches/vserver-xen-clash.patch b/debian/patches/vserver-xen-clash.patch
deleted file mode 100644
index 14d044832ed0..000000000000
--- a/debian/patches/vserver-xen-clash.patch
+++ /dev/null
@@ -1,60 +0,0 @@
---- a/arch/i386/boot/compressed/misc.c
-+++ b/arch/i386/boot/compressed/misc.c
-@@ -309,7 +309,7 @@ static void setup_normal_output_buffer(v
- #else
- if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
- #endif
-+ output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */
-- output_data = (char *)PHYSICAL_START; /* Normally Points to 1M */
- free_mem_end_ptr = (long)real_mode;
- }
-
-@@ -334,8 +334,8 @@ static void setup_output_buffer_if_we_ru
- low_buffer_size = low_buffer_end - LOW_BUFFER_START;
- high_loaded = 1;
- free_mem_end_ptr = (long)high_buffer_start;
-+ if ( (__PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
-+ high_buffer_start = (uch *)(__PHYSICAL_START + low_buffer_size);
-- if ((PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
-- high_buffer_start = (uch *)(PHYSICAL_START + low_buffer_size);
- mv->hcount = 0; /* say: we need not to move high_buffer */
- }
- else mv->hcount = -1;
---- a/arch/i386/kernel/setup.c
-+++ b/arch/i386/kernel/setup.c
-@@ -1192,8 +1192,8 @@ void __init setup_bootmem_allocator(void
- * the (very unlikely) case of us accidentally initializing the
- * bootmem allocator with an invalid RAM area.
- */
-+ reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
-+ bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
-- reserve_bootmem(PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
-- bootmap_size + PAGE_SIZE-1) - (PHYSICAL_START));
-
- /*
- * reserve physical page 0 - it's a special BIOS page on many boxes,
---- a/include/asm-i386/page.h
-+++ b/include/asm-i386/page.h
-@@ -109,15 +109,19 @@ extern int page_is_ram(unsigned long pag
-
- #endif /* __ASSEMBLY__ */
-
-+#ifdef __ASSEMBLY__
- #define __PAGE_OFFSET CONFIG_PAGE_OFFSET
- #define __PHYSICAL_START CONFIG_PHYSICAL_START
-+#else
-+#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
-+#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
-+#endif
- #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
-+
--#define __MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
-
- #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
--#define PHYSICAL_START ((unsigned long)__PHYSICAL_START)
- #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
-+#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
--#define MAXMEM ((unsigned long)__MAXMEM)
- #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
- #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
- #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/debian/patches/xen-tree-merge-21966.patch b/debian/patches/xen-tree-merge-21966.patch
deleted file mode 100644
index 48414aabfd23..000000000000
--- a/debian/patches/xen-tree-merge-21966.patch
+++ /dev/null
@@ -1,77857 +0,0 @@
-diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
-index 5b1a7d4..6a4988c 100644
---- a/arch/i386/Kconfig
-+++ b/arch/i386/Kconfig
-@@ -58,6 +58,15 @@ config X86_PC
- help
- Choose this option if your computer is a standard PC or compatible.
-
-+config X86_XEN
-+ bool "Xen-compatible"
-+ select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
-+ select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST
-+ select SWIOTLB
-+ help
-+ Choose this option if you plan to run this kernel on top of the
-+ Xen Hypervisor.
-+
- config X86_ELAN
- bool "AMD Elan"
- help
-@@ -159,6 +168,7 @@ source "arch/i386/Kconfig.cpu"
-
- config HPET_TIMER
- bool "HPET Timer Support"
-+ depends on !X86_XEN
- help
- This enables the use of the HPET for the kernel's internal timer.
- HPET is the next generation timer replacing legacy 8254s.
-@@ -202,6 +212,19 @@ config SMP
-
- If you don't know what to do here, say N.
-
-+config SMP_ALTERNATIVES
-+ bool "SMP alternatives support (EXPERIMENTAL)"
-+ depends on SMP && EXPERIMENTAL
-+ help
-+ Try to reduce the overhead of running an SMP kernel on a uniprocessor
-+ host slightly by replacing certain key instruction sequences
-+ according to whether we currently have more than one CPU available.
-+ This should provide a noticeable boost to performance when
-+ running SMP kernels on UP machines, and have negligible impact
-+ when running on an true SMP host.
-+
-+ If unsure, say N.
-+
- config NR_CPUS
- int "Maximum number of CPUs (2-255)"
- range 2 255
-@@ -218,7 +241,7 @@ config NR_CPUS
-
- config SCHED_SMT
- bool "SMT (Hyperthreading) scheduler support"
-- depends on SMP
-+ depends on SMP && !X86_XEN
- default off
- help
- SMT scheduler support improves the CPU scheduler's decision making
-@@ -230,7 +253,7 @@ source "kernel/Kconfig.preempt"
-
- config X86_UP_APIC
- bool "Local APIC support on uniprocessors"
-- depends on !SMP && !(X86_VISWS || X86_VOYAGER)
-+ depends on !SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
- help
- A local APIC (Advanced Programmable Interrupt Controller) is an
- integrated interrupt controller in the CPU. If you have a single-CPU
-@@ -255,12 +278,12 @@ config X86_UP_IOAPIC
-
- config X86_LOCAL_APIC
- bool
-- depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER)
-+ depends on X86_UP_APIC || ((X86_VISWS || SMP) && !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST))
- default y
-
- config X86_IO_APIC
- bool
-- depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER))
-+ depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST))
- default y
-
- config X86_VISWS_APIC
-@@ -268,9 +291,14 @@ config X86_VISWS_APIC
- depends on X86_VISWS
- default y
-
-+config X86_TSC
-+ bool
-+ depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ && !X86_XEN
-+ default y
-+
- config X86_MCE
- bool "Machine Check Exception"
-- depends on !X86_VOYAGER
-+ depends on !(X86_VOYAGER || X86_XEN)
- ---help---
- Machine Check Exception support allows the processor to notify the
- kernel if it detects a problem (e.g. overheating, component failure).
-@@ -360,6 +388,7 @@ config X86_REBOOTFIXUPS
-
- config MICROCODE
- tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
-+ depends on !XEN_UNPRIVILEGED_GUEST
- ---help---
- If you say Y here and also to "/dev file system support" in the
- 'File systems' section, you will be able to update the microcode on
-@@ -377,6 +406,7 @@ config MICROCODE
-
- config X86_MSR
- tristate "/dev/cpu/*/msr - Model-specific register support"
-+ depends on !X86_XEN
- help
- This device gives privileged processes access to the x86
- Model-Specific Registers (MSRs). It is a character device with
-@@ -392,6 +422,10 @@ config X86_CPUID
- with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
- /dev/cpu/31/cpuid.
-
-+config SWIOTLB
-+ bool
-+ default n
-+
- source "drivers/firmware/Kconfig"
-
- choice
-@@ -560,7 +594,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
-
- config HIGHPTE
- bool "Allocate 3rd-level pagetables from highmem"
-- depends on HIGHMEM4G || HIGHMEM64G
-+ depends on (HIGHMEM4G || HIGHMEM64G) && !X86_XEN
- help
- The VM uses one page table entry for each page of physical memory.
- For systems with a lot of RAM, this can be wasteful of precious
-@@ -569,6 +603,7 @@ config HIGHPTE
-
- config MATH_EMULATION
- bool "Math emulation"
-+ depends on !X86_XEN
- ---help---
- Linux can emulate a math coprocessor (used for floating point
- operations) if you don't have one. 486DX and Pentium processors have
-@@ -594,6 +629,8 @@ config MATH_EMULATION
-
- config MTRR
- bool "MTRR (Memory Type Range Register) support"
-+ depends on !XEN_UNPRIVILEGED_GUEST
-+ default y if X86_XEN
- ---help---
- On Intel P6 family processors (Pentium Pro, Pentium II and later)
- the Memory Type Range Registers (MTRRs) may be used to control
-@@ -628,7 +665,7 @@ config MTRR
-
- config EFI
- bool "Boot from EFI support (EXPERIMENTAL)"
-- depends on ACPI
-+ depends on ACPI && !X86_XEN
- default n
- ---help---
- This enables the the kernel to boot on EFI platforms using
-@@ -646,7 +683,7 @@ config EFI
-
- config IRQBALANCE
- bool "Enable kernel irq balancing"
-- depends on SMP && X86_IO_APIC
-+ depends on SMP && X86_IO_APIC && !X86_XEN
- default y
- help
- The default yes will allow the kernel to do irq load balancing.
-@@ -689,7 +726,7 @@ source kernel/Kconfig.hz
-
- config KEXEC
- bool "kexec system call (EXPERIMENTAL)"
-- depends on EXPERIMENTAL
-+ depends on EXPERIMENTAL && !X86_XEN
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-@@ -743,6 +780,7 @@ config HOTPLUG_CPU
- config DOUBLEFAULT
- default y
- bool "Enable doublefault exception handler" if EMBEDDED
-+ depends on !X86_NO_TSS
- help
- This option allows trapping of rare doublefault exceptions that
- would otherwise cause a system to silently reboot. Disabling this
-@@ -753,18 +791,20 @@ endmenu
-
-
- menu "Power management options (ACPI, APM)"
-- depends on !X86_VOYAGER
-+ depends on !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
-
-+if !X86_XEN
- source kernel/power/Kconfig
-+endif
-
- source "drivers/acpi/Kconfig"
-
- menu "APM (Advanced Power Management) BIOS Support"
--depends on PM && !X86_VISWS
-+depends on PM && !(X86_VISWS || X86_XEN)
-
- config APM
- tristate "APM (Advanced Power Management) BIOS support"
-- depends on PM
-+ depends on PM && PM_LEGACY
- ---help---
- APM is a BIOS specification for saving power using several different
- techniques. This is mostly useful for battery powered laptops with
-@@ -949,6 +989,7 @@ choice
-
- config PCI_GOBIOS
- bool "BIOS"
-+ depends on !X86_XEN
-
- config PCI_GOMMCONFIG
- bool "MMConfig"
-@@ -956,6 +997,13 @@ config PCI_GOMMCONFIG
- config PCI_GODIRECT
- bool "Direct"
-
-+config PCI_GOXEN_FE
-+ bool "Xen PCI Frontend"
-+ depends on X86_XEN
-+ help
-+ The PCI device frontend driver allows the kernel to import arbitrary
-+ PCI devices from a PCI backend to support PCI driver domains.
-+
- config PCI_GOANY
- bool "Any"
-
-@@ -963,7 +1011,7 @@ endchoice
-
- config PCI_BIOS
- bool
-- depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
-+ depends on !(X86_VISWS || X86_XEN) && PCI && (PCI_GOBIOS || PCI_GOANY)
- default y
-
- config PCI_DIRECT
-@@ -976,6 +1024,18 @@ config PCI_MMCONFIG
- depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
- default y
-
-+config XEN_PCIDEV_FRONTEND
-+ bool
-+ depends on PCI && X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)
-+ default y
-+
-+config XEN_PCIDEV_FE_DEBUG
-+ bool "Xen PCI Frontend Debugging"
-+ depends on XEN_PCIDEV_FRONTEND
-+ default n
-+ help
-+ Enables some debug statements within the PCI Frontend.
-+
- source "drivers/pci/pcie/Kconfig"
-
- source "drivers/pci/Kconfig"
-@@ -986,7 +1046,7 @@ config ISA_DMA_API
-
- config ISA
- bool "ISA support"
-- depends on !(X86_VOYAGER || X86_VISWS)
-+ depends on !(X86_VOYAGER || X86_VISWS || X86_XEN)
- help
- Find out whether you have ISA slots on your motherboard. ISA is the
- name of a bus system, i.e. the way the CPU talks to the other stuff
-@@ -1013,7 +1073,7 @@ config EISA
- source "drivers/eisa/Kconfig"
-
- config MCA
-- bool "MCA support" if !(X86_VISWS || X86_VOYAGER)
-+ bool "MCA support" if !(X86_VISWS || X86_VOYAGER || X86_XEN)
- default y if X86_VOYAGER
- help
- MicroChannel Architecture is found in some IBM PS/2 machines and
-@@ -1056,7 +1116,9 @@ source "fs/Kconfig"
- menu "Instrumentation Support"
- depends on EXPERIMENTAL
-
-+if !X86_XEN
- source "arch/i386/oprofile/Kconfig"
-+endif
-
- config KPROBES
- bool "Kprobes (EXPERIMENTAL)"
-@@ -1075,6 +1137,8 @@ source "security/Kconfig"
-
- source "crypto/Kconfig"
-
-+source "drivers/xen/Kconfig"
-+
- source "lib/Kconfig"
-
- #
-@@ -1100,7 +1164,7 @@ config X86_SMP
-
- config X86_HT
- bool
-- depends on SMP && !(X86_VISWS || X86_VOYAGER)
-+ depends on SMP && !(X86_VISWS || X86_VOYAGER || X86_XEN)
- default y
-
- config X86_BIOS_REBOOT
-@@ -1113,6 +1177,21 @@ config X86_TRAMPOLINE
- depends on X86_SMP || (X86_VOYAGER && SMP)
- default y
-
-+config X86_NO_TSS
-+ bool
-+ depends on X86_XEN
-+ default y
-+
-+config X86_SYSENTER
-+ bool
-+ depends on !X86_NO_TSS
-+ default y
-+
-+config X86_NO_IDT
-+ bool
-+ depends on X86_XEN
-+ default y
-+
- config KTIME_SCALAR
- bool
- default y
-diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
-index 79603b3..a52d7d4 100644
---- a/arch/i386/Kconfig.cpu
-+++ b/arch/i386/Kconfig.cpu
-@@ -251,7 +251,7 @@ config X86_PPRO_FENCE
-
- config X86_F00F_BUG
- bool
-- depends on M586MMX || M586TSC || M586 || M486 || M386
-+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT
- default y
-
- config X86_WP_WORKS_OK
-diff --git a/arch/i386/Makefile b/arch/i386/Makefile
-index 36bef65..757acb8 100644
---- a/arch/i386/Makefile
-+++ b/arch/i386/Makefile
-@@ -68,6 +68,10 @@ mcore-$(CONFIG_X86_BIGSMP) := mach-defau
- mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
- mcore-$(CONFIG_X86_SUMMIT) := mach-default
-
-+# Xen subarch support
-+mflags-$(CONFIG_X86_XEN) := -Iinclude/asm-i386/mach-xen
-+mcore-$(CONFIG_X86_XEN) := mach-xen
-+
- # generic subarchitecture
- mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
- mcore-$(CONFIG_X86_GENERICARCH) := mach-default
-@@ -96,12 +100,25 @@ drivers-$(CONFIG_PM) += arch/i386/powe
-
- CFLAGS += $(mflags-y)
- AFLAGS += $(mflags-y)
-+CPPFLAGS += $(mflags-y)
-
- boot := arch/i386/boot
-
- .PHONY: zImage bzImage compressed zlilo bzlilo \
- zdisk bzdisk fdimage fdimage144 fdimage288 install
-
-+ifdef CONFIG_XEN
-+head-y := arch/i386/kernel/head-xen.o arch/i386/kernel/init_task-xen.o
-+boot := arch/i386/boot-xen
-+.PHONY: vmlinuz
-+all: vmlinuz
-+
-+vmlinuz: vmlinux
-+ $(Q)$(MAKE) $(build)=$(boot) $@
-+
-+install:
-+ $(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
-+else
- all: bzImage
-
- # KBUILD_IMAGE specify target image being built
-@@ -124,6 +141,7 @@ fdimage fdimage144 fdimage288: vmlinux
-
- install:
- $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
-+endif
-
- archclean:
- $(Q)$(MAKE) $(clean)=arch/i386/boot
-diff --git a/arch/i386/boot-xen/Makefile b/arch/i386/boot-xen/Makefile
-new file mode 100644
-index 0000000..bf15a77
---- /dev/null
-+++ b/arch/i386/boot-xen/Makefile
-@@ -0,0 +1,21 @@
-+
-+OBJCOPYFLAGS := -g --strip-unneeded
-+
-+vmlinuz: vmlinux-stripped FORCE
-+ $(call if_changed,gzip)
-+
-+vmlinux-stripped: vmlinux FORCE
-+ $(call if_changed,objcopy)
-+
-+INSTALL_ROOT := $(patsubst %/boot,%,$(INSTALL_PATH))
-+
-+XINSTALL_NAME ?= $(KERNELRELEASE)
-+install:
-+ mkdir -p $(INSTALL_ROOT)/boot
-+ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-+ rm -f $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ install -m0644 vmlinuz $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ install -m0644 vmlinux $(INSTALL_ROOT)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ install -m0664 .config $(INSTALL_ROOT)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ install -m0664 System.map $(INSTALL_ROOT)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
-index 65656c0..5661a9b 100644
---- a/arch/i386/kernel/Makefile
-+++ b/arch/i386/kernel/Makefile
-@@ -37,17 +37,26 @@ obj-$(CONFIG_EFI) += efi.o efi_stub.o
- obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
- obj-$(CONFIG_VM86) += vm86.o
- obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-+obj-$(CONFIG_SMP_ALTERNATIVES) += smpalts.o
-
- EXTRA_AFLAGS := -traditional
-
- obj-$(CONFIG_SCx200) += scx200.o
-
-+ifdef CONFIG_XEN
-+vsyscall_note := vsyscall-note-xen.o
-+else
-+vsyscall_note := vsyscall-note.o
-+endif
-+
-+VSYSCALL_TYPES-y := int80
-+VSYSCALL_TYPES-$(CONFIG_X86_SYSENTER) += sysenter
- # vsyscall.o contains the vsyscall DSO images as __initdata.
- # We must build both images before we can assemble it.
- # Note: kbuild does not track this dependency due to usage of .incbin
--$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
--targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
--targets += vsyscall-note.o vsyscall.lds
-+$(obj)/vsyscall.o: $(foreach F,$(VSYSCALL_TYPES-y),$(obj)/vsyscall-$F.so)
-+targets += $(foreach F,$(VSYSCALL_TYPES-y),vsyscall-$F.o vsyscall-$F.so)
-+targets += $(vsyscall_note) vsyscall.lds
-
- # The DSO images are built using a special linker script.
- quiet_cmd_syscall = SYSCALL $@
-@@ -62,7 +71,7 @@ SYSCFLAGS_vsyscall-int80.so = $(vsyscall
-
- $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
- $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
-- $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
-+ $(obj)/vsyscall-%.o $(obj)/$(vsyscall_note) FORCE
- $(call if_changed,syscall)
-
- # We also create a special relocatable object that should mirror the symbol
-@@ -74,5 +83,18 @@ $(obj)/built-in.o: ld_flags += -R $(obj)
-
- SYSCFLAGS_vsyscall-syms.o = -r
- $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
-- $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
-+ $(foreach F,$(VSYSCALL_TYPES-y),$(obj)/vsyscall-$F.o) \
-+ $(obj)/$(vsyscall_note) FORCE
- $(call if_changed,syscall)
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y += fixup.o
-+microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
-+n-obj-xen := i8259.o timers/ reboot.o smpboot.o trampoline.o
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+extra-y := $(call cherrypickxen, $(extra-y))
-+endif
-diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile
-index 7e9ac99..fa783e6 100644
---- a/arch/i386/kernel/acpi/Makefile
-+++ b/arch/i386/kernel/acpi/Makefile
-@@ -6,3 +6,7 @@ ifneq ($(CONFIG_ACPI_PROCESSOR),)
- obj-y += cstate.o processor.o
- endif
-
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y), $(src))
-+endif
-diff --git a/arch/i386/kernel/acpi/boot-xen.c b/arch/i386/kernel/acpi/boot-xen.c
-new file mode 100644
-index 0000000..29b491e
---- /dev/null
-+++ b/arch/i386/kernel/acpi/boot-xen.c
-@@ -0,0 +1,1163 @@
-+/*
-+ * boot.c - Architecture-Specific Low-Level ACPI Boot Support
-+ *
-+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
-+ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/config.h>
-+#include <linux/acpi.h>
-+#include <linux/efi.h>
-+#include <linux/module.h>
-+#include <linux/dmi.h>
-+#include <linux/irq.h>
-+
-+#include <asm/pgtable.h>
-+#include <asm/io_apic.h>
-+#include <asm/apic.h>
-+#include <asm/io.h>
-+#include <asm/mpspec.h>
-+
-+#ifdef CONFIG_X86_64
-+
-+extern void __init clustered_apic_check(void);
-+
-+extern int gsi_irq_sharing(int gsi);
-+#include <asm/proto.h>
-+
-+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
-+
-+#else /* X86 */
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#include <mach_apic.h>
-+#include <mach_mpparse.h>
-+#endif /* CONFIG_X86_LOCAL_APIC */
-+
-+static inline int gsi_irq_sharing(int gsi) { return gsi; }
-+
-+#endif /* X86 */
-+
-+#define BAD_MADT_ENTRY(entry, end) ( \
-+ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
-+ ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
-+
-+#define PREFIX "ACPI: "
-+
-+int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
-+int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
-+int acpi_ht __initdata = 1; /* enable HT */
-+
-+int acpi_lapic;
-+int acpi_ioapic;
-+int acpi_strict;
-+EXPORT_SYMBOL(acpi_strict);
-+
-+acpi_interrupt_flags acpi_sci_flags __initdata;
-+int acpi_sci_override_gsi __initdata;
-+int acpi_skip_timer_override __initdata;
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
-+#endif
-+
-+#ifndef __HAVE_ARCH_CMPXCHG
-+#warning ACPI uses CMPXCHG, i486 and later hardware
-+#endif
-+
-+#define MAX_MADT_ENTRIES 256
-+u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
-+ {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
-+EXPORT_SYMBOL(x86_acpiid_to_apicid);
-+
-+/* --------------------------------------------------------------------------
-+ Boot-time Configuration
-+ -------------------------------------------------------------------------- */
-+
-+/*
-+ * The default interrupt routing model is PIC (8259). This gets
-+ * overriden if IOAPICs are enumerated (below).
-+ */
-+enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
-+
-+#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
-+
-+/* rely on all ACPI tables being in the direct mapping */
-+char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
-+{
-+ if (!phys_addr || !size)
-+ return NULL;
-+
-+ if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
-+ return __va(phys_addr);
-+
-+ return NULL;
-+}
-+
-+#else
-+
-+/*
-+ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
-+ * to map the target physical address. The problem is that set_fixmap()
-+ * provides a single page, and it is possible that the page is not
-+ * sufficient.
-+ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
-+ * i.e. until the next __va_range() call.
-+ *
-+ * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
-+ * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
-+ * count idx down while incrementing the phys address.
-+ */
-+char *__acpi_map_table(unsigned long phys, unsigned long size)
-+{
-+ unsigned long base, offset, mapped_size;
-+ int idx;
-+
-+#ifndef CONFIG_XEN
-+ if (phys + size < 8 * 1024 * 1024)
-+ return __va(phys);
-+#endif
-+
-+ offset = phys & (PAGE_SIZE - 1);
-+ mapped_size = PAGE_SIZE - offset;
-+ set_fixmap(FIX_ACPI_END, phys);
-+ base = fix_to_virt(FIX_ACPI_END);
-+
-+ /*
-+ * Most cases can be covered by the below.
-+ */
-+ idx = FIX_ACPI_END;
-+ while (mapped_size < size) {
-+ if (--idx < FIX_ACPI_BEGIN)
-+ return NULL; /* cannot handle this */
-+ phys += PAGE_SIZE;
-+ set_fixmap(idx, phys);
-+ mapped_size += PAGE_SIZE;
-+ }
-+
-+ return ((unsigned char *)base + offset);
-+}
-+#endif
-+
-+#ifdef CONFIG_PCI_MMCONFIG
-+/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
-+struct acpi_table_mcfg_config *pci_mmcfg_config;
-+int pci_mmcfg_config_num;
-+
-+int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
-+{
-+ struct acpi_table_mcfg *mcfg;
-+ unsigned long i;
-+ int config_size;
-+
-+ if (!phys_addr || !size)
-+ return -EINVAL;
-+
-+ mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
-+ if (!mcfg) {
-+ printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
-+ return -ENODEV;
-+ }
-+
-+ /* how many config structures do we have */
-+ pci_mmcfg_config_num = 0;
-+ i = size - sizeof(struct acpi_table_mcfg);
-+ while (i >= sizeof(struct acpi_table_mcfg_config)) {
-+ ++pci_mmcfg_config_num;
-+ i -= sizeof(struct acpi_table_mcfg_config);
-+ };
-+ if (pci_mmcfg_config_num == 0) {
-+ printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
-+ return -ENODEV;
-+ }
-+
-+ config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
-+ pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
-+ if (!pci_mmcfg_config) {
-+ printk(KERN_WARNING PREFIX
-+ "No memory for MCFG config tables\n");
-+ return -ENOMEM;
-+ }
-+
-+ memcpy(pci_mmcfg_config, &mcfg->config, config_size);
-+ for (i = 0; i < pci_mmcfg_config_num; ++i) {
-+ if (mcfg->config[i].base_reserved) {
-+ printk(KERN_ERR PREFIX
-+ "MMCONFIG not in low 4GB of memory\n");
-+ return -ENODEV;
-+ }
-+ }
-+
-+ return 0;
-+}
-+#endif /* CONFIG_PCI_MMCONFIG */
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
-+{
-+ struct acpi_table_madt *madt = NULL;
-+
-+ if (!phys_addr || !size)
-+ return -EINVAL;
-+
-+ madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
-+ if (!madt) {
-+ printk(KERN_WARNING PREFIX "Unable to map MADT\n");
-+ return -ENODEV;
-+ }
-+
-+ if (madt->lapic_address) {
-+ acpi_lapic_addr = (u64) madt->lapic_address;
-+
-+ printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
-+ madt->lapic_address);
-+ }
-+
-+ acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
-+
-+ return 0;
-+}
-+
-+static int __init
-+acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
-+{
-+ struct acpi_table_lapic *processor = NULL;
-+
-+ processor = (struct acpi_table_lapic *)header;
-+
-+ if (BAD_MADT_ENTRY(processor, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ /* Record local apic id only when enabled */
-+ if (processor->flags.enabled)
-+ x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
-+
-+ /*
-+ * We need to register disabled CPU as well to permit
-+ * counting disabled CPUs. This allows us to size
-+ * cpus_possible_map more accurately, to permit
-+ * to not preallocating memory for all NR_CPUS
-+ * when we use CPU hotplug.
-+ */
-+ mp_register_lapic(processor->id, /* APIC ID */
-+ processor->flags.enabled); /* Enabled? */
-+
-+ return 0;
-+}
-+
-+static int __init
-+acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
-+ const unsigned long end)
-+{
-+ struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
-+
-+ lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
-+
-+ if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
-+ return -EINVAL;
-+
-+ acpi_lapic_addr = lapic_addr_ovr->address;
-+
-+ return 0;
-+}
-+
-+static int __init
-+acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
-+{
-+ struct acpi_table_lapic_nmi *lapic_nmi = NULL;
-+
-+ lapic_nmi = (struct acpi_table_lapic_nmi *)header;
-+
-+ if (BAD_MADT_ENTRY(lapic_nmi, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ if (lapic_nmi->lint != 1)
-+ printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
-+
-+ return 0;
-+}
-+
-+#endif /*CONFIG_X86_LOCAL_APIC */
-+
-+#ifdef CONFIG_X86_IO_APIC
-+
-+static int __init
-+acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
-+{
-+ struct acpi_table_ioapic *ioapic = NULL;
-+
-+ ioapic = (struct acpi_table_ioapic *)header;
-+
-+ if (BAD_MADT_ENTRY(ioapic, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ mp_register_ioapic(ioapic->id,
-+ ioapic->address, ioapic->global_irq_base);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Parse Interrupt Source Override for the ACPI SCI
-+ */
-+static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
-+{
-+ if (trigger == 0) /* compatible SCI trigger is level */
-+ trigger = 3;
-+
-+ if (polarity == 0) /* compatible SCI polarity is low */
-+ polarity = 3;
-+
-+ /* Command-line over-ride via acpi_sci= */
-+ if (acpi_sci_flags.trigger)
-+ trigger = acpi_sci_flags.trigger;
-+
-+ if (acpi_sci_flags.polarity)
-+ polarity = acpi_sci_flags.polarity;
-+
-+ /*
-+ * mp_config_acpi_legacy_irqs() already setup IRQs < 16
-+ * If GSI is < 16, this will update its flags,
-+ * else it will create a new mp_irqs[] entry.
-+ */
-+ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
-+
-+ /*
-+ * stash over-ride to indicate we've been here
-+ * and for later update of acpi_fadt
-+ */
-+ acpi_sci_override_gsi = gsi;
-+ return;
-+}
-+
-+static int __init
-+acpi_parse_int_src_ovr(acpi_table_entry_header * header,
-+ const unsigned long end)
-+{
-+ struct acpi_table_int_src_ovr *intsrc = NULL;
-+
-+ intsrc = (struct acpi_table_int_src_ovr *)header;
-+
-+ if (BAD_MADT_ENTRY(intsrc, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ if (intsrc->bus_irq == acpi_fadt.sci_int) {
-+ acpi_sci_ioapic_setup(intsrc->global_irq,
-+ intsrc->flags.polarity,
-+ intsrc->flags.trigger);
-+ return 0;
-+ }
-+
-+ if (acpi_skip_timer_override &&
-+ intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
-+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
-+ return 0;
-+ }
-+
-+ mp_override_legacy_irq(intsrc->bus_irq,
-+ intsrc->flags.polarity,
-+ intsrc->flags.trigger, intsrc->global_irq);
-+
-+ return 0;
-+}
-+
-+static int __init
-+acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
-+{
-+ struct acpi_table_nmi_src *nmi_src = NULL;
-+
-+ nmi_src = (struct acpi_table_nmi_src *)header;
-+
-+ if (BAD_MADT_ENTRY(nmi_src, end))
-+ return -EINVAL;
-+
-+ acpi_table_print_madt_entry(header);
-+
-+ /* TBD: Support nimsrc entries? */
-+
-+ return 0;
-+}
-+
-+#endif /* CONFIG_X86_IO_APIC */
-+
-+/*
-+ * acpi_pic_sci_set_trigger()
-+ *
-+ * use ELCR to set PIC-mode trigger type for SCI
-+ *
-+ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
-+ * it may require Edge Trigger -- use "acpi_sci=edge"
-+ *
-+ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
-+ * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
-+ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
-+ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
-+ */
-+
-+void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
-+{
-+ unsigned int mask = 1 << irq;
-+ unsigned int old, new;
-+
-+ /* Real old ELCR mask */
-+ old = inb(0x4d0) | (inb(0x4d1) << 8);
-+
-+ /*
-+ * If we use ACPI to set PCI irq's, then we should clear ELCR
-+ * since we will set it correctly as we enable the PCI irq
-+ * routing.
-+ */
-+ new = acpi_noirq ? old : 0;
-+
-+ /*
-+ * Update SCI information in the ELCR, it isn't in the PCI
-+ * routing tables..
-+ */
-+ switch (trigger) {
-+ case 1: /* Edge - clear */
-+ new &= ~mask;
-+ break;
-+ case 3: /* Level - set */
-+ new |= mask;
-+ break;
-+ }
-+
-+ if (old == new)
-+ return;
-+
-+ printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
-+ outb(new, 0x4d0);
-+ outb(new >> 8, 0x4d1);
-+}
-+
-+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+ if (use_pci_vector() && !platform_legacy_irq(gsi))
-+ *irq = IO_APIC_VECTOR(gsi);
-+ else
-+#endif
-+ *irq = gsi_irq_sharing(gsi);
-+ return 0;
-+}
-+
-+/*
-+ * success: return IRQ number (>=0)
-+ * failure: return < 0
-+ */
-+int acpi_register_gsi(u32 gsi, int triggering, int polarity)
-+{
-+ unsigned int irq;
-+ unsigned int plat_gsi = gsi;
-+
-+#ifdef CONFIG_PCI
-+ /*
-+ * Make sure all (legacy) PCI IRQs are set as level-triggered.
-+ */
-+ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
-+ extern void eisa_set_level_irq(unsigned int irq);
-+
-+ if (triggering == ACPI_LEVEL_SENSITIVE)
-+ eisa_set_level_irq(gsi);
-+ }
-+#endif
-+
-+#ifdef CONFIG_X86_IO_APIC
-+ if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
-+ plat_gsi = mp_register_gsi(gsi, triggering, polarity);
-+ }
-+#endif
-+ acpi_gsi_to_irq(plat_gsi, &irq);
-+ return irq;
-+}
-+
-+EXPORT_SYMBOL(acpi_register_gsi);
-+
-+/*
-+ * ACPI based hotplug support for CPU
-+ */
-+#ifdef CONFIG_ACPI_HOTPLUG_CPU
-+int acpi_map_lsapic(acpi_handle handle, int *pcpu)
-+{
-+ /* TBD */
-+ return -EINVAL;
-+}
-+
-+EXPORT_SYMBOL(acpi_map_lsapic);
-+
-+int acpi_unmap_lsapic(int cpu)
-+{
-+ /* TBD */
-+ return -EINVAL;
-+}
-+
-+EXPORT_SYMBOL(acpi_unmap_lsapic);
-+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-+
-+int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
-+{
-+ /* TBD */
-+ return -EINVAL;
-+}
-+
-+EXPORT_SYMBOL(acpi_register_ioapic);
-+
-+int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
-+{
-+ /* TBD */
-+ return -EINVAL;
-+}
-+
-+EXPORT_SYMBOL(acpi_unregister_ioapic);
-+
-+static unsigned long __init
-+acpi_scan_rsdp(unsigned long start, unsigned long length)
-+{
-+ unsigned long offset = 0;
-+ unsigned long sig_len = sizeof("RSD PTR ") - 1;
-+ unsigned long vstart = (unsigned long)isa_bus_to_virt(start);
-+
-+ /*
-+ * Scan all 16-byte boundaries of the physical memory region for the
-+ * RSDP signature.
-+ */
-+ for (offset = 0; offset < length; offset += 16) {
-+ if (strncmp((char *)(vstart + offset), "RSD PTR ", sig_len))
-+ continue;
-+ return (start + offset);
-+ }
-+
-+ return 0;
-+}
-+
-+static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
-+{
-+ struct acpi_table_sbf *sb;
-+
-+ if (!phys_addr || !size)
-+ return -EINVAL;
-+
-+ sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
-+ if (!sb) {
-+ printk(KERN_WARNING PREFIX "Unable to map SBF\n");
-+ return -ENODEV;
-+ }
-+
-+ sbf_port = sb->sbf_cmos; /* Save CMOS port */
-+
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HPET_TIMER
-+
-+static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
-+{
-+ struct acpi_table_hpet *hpet_tbl;
-+
-+ if (!phys || !size)
-+ return -EINVAL;
-+
-+ hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
-+ if (!hpet_tbl) {
-+ printk(KERN_WARNING PREFIX "Unable to map HPET\n");
-+ return -ENODEV;
-+ }
-+
-+ if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
-+ printk(KERN_WARNING PREFIX "HPET timers must be located in "
-+ "memory.\n");
-+ return -1;
-+ }
-+#ifdef CONFIG_X86_64
-+ vxtime.hpet_address = hpet_tbl->addr.addrl |
-+ ((long)hpet_tbl->addr.addrh << 32);
-+
-+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
-+ hpet_tbl->id, vxtime.hpet_address);
-+#else /* X86 */
-+ {
-+ extern unsigned long hpet_address;
-+
-+ hpet_address = hpet_tbl->addr.addrl;
-+ printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
-+ hpet_tbl->id, hpet_address);
-+ }
-+#endif /* X86 */
-+
-+ return 0;
-+}
-+#else
-+#define acpi_parse_hpet NULL
-+#endif
-+
-+#ifdef CONFIG_X86_PM_TIMER
-+extern u32 pmtmr_ioport;
-+#endif
-+
-+static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
-+{
-+ struct fadt_descriptor_rev2 *fadt = NULL;
-+
-+ fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size);
-+ if (!fadt) {
-+ printk(KERN_WARNING PREFIX "Unable to map FADT\n");
-+ return 0;
-+ }
-+ /* initialize sci_int early for INT_SRC_OVR MADT parsing */
-+ acpi_fadt.sci_int = fadt->sci_int;
-+
-+ /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
-+ acpi_fadt.revision = fadt->revision;
-+ acpi_fadt.force_apic_physical_destination_mode =
-+ fadt->force_apic_physical_destination_mode;
-+
-+#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
-+ /* detect the location of the ACPI PM Timer */
-+ if (fadt->revision >= FADT2_REVISION_ID) {
-+ /* FADT rev. 2 */
-+ if (fadt->xpm_tmr_blk.address_space_id !=
-+ ACPI_ADR_SPACE_SYSTEM_IO)
-+ return 0;
-+
-+ pmtmr_ioport = fadt->xpm_tmr_blk.address;
-+ /*
-+ * "X" fields are optional extensions to the original V1.0
-+ * fields, so we must selectively expand V1.0 fields if the
-+ * corresponding X field is zero.
-+ */
-+ if (!pmtmr_ioport)
-+ pmtmr_ioport = fadt->V1_pm_tmr_blk;
-+ } else {
-+ /* FADT rev. 1 */
-+ pmtmr_ioport = fadt->V1_pm_tmr_blk;
-+ }
-+ if (pmtmr_ioport)
-+ printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
-+ pmtmr_ioport);
-+#endif
-+ return 0;
-+}
-+
-+unsigned long __init acpi_find_rsdp(void)
-+{
-+ unsigned long rsdp_phys = 0;
-+
-+ if (efi_enabled) {
-+ if (efi.acpi20)
-+ return __pa(efi.acpi20);
-+ else if (efi.acpi)
-+ return __pa(efi.acpi);
-+ }
-+ /*
-+ * Scan memory looking for the RSDP signature. First search EBDA (low
-+ * memory) paragraphs and then search upper memory (E0000-FFFFF).
-+ */
-+ rsdp_phys = acpi_scan_rsdp(0, 0x400);
-+ if (!rsdp_phys)
-+ rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
-+
-+ return rsdp_phys;
-+}
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+/*
-+ * Parse LAPIC entries in MADT
-+ * returns 0 on success, < 0 on error
-+ */
-+static int __init acpi_parse_madt_lapic_entries(void)
-+{
-+ int count;
-+
-+ /*
-+ * Note that the LAPIC address is obtained from the MADT (32-bit value)
-+ * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
-+ */
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
-+ acpi_parse_lapic_addr_ovr, 0);
-+ if (count < 0) {
-+ printk(KERN_ERR PREFIX
-+ "Error parsing LAPIC address override entry\n");
-+ return count;
-+ }
-+
-+ mp_register_lapic_address(acpi_lapic_addr);
-+
-+ count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
-+ MAX_APICS);
-+ if (!count) {
-+ printk(KERN_ERR PREFIX "No LAPIC entries present\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return -ENODEV;
-+ } else if (count < 0) {
-+ printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return count;
-+ }
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
-+ if (count < 0) {
-+ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return count;
-+ }
-+ return 0;
-+}
-+#endif /* CONFIG_X86_LOCAL_APIC */
-+
-+#ifdef CONFIG_X86_IO_APIC
-+/*
-+ * Parse IOAPIC related entries in MADT
-+ * returns 0 on success, < 0 on error
-+ */
-+static int __init acpi_parse_madt_ioapic_entries(void)
-+{
-+ int count;
-+
-+ /*
-+ * ACPI interpreter is required to complete interrupt setup,
-+ * so if it is off, don't enumerate the io-apics with ACPI.
-+ * If MPS is present, it will handle them,
-+ * otherwise the system will stay in PIC mode
-+ */
-+ if (acpi_disabled || acpi_noirq) {
-+ return -ENODEV;
-+ }
-+
-+ /*
-+ * if "noapic" boot option, don't look for IO-APICs
-+ */
-+ if (skip_ioapic_setup) {
-+ printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
-+ "due to 'noapic' option.\n");
-+ return -ENODEV;
-+ }
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
-+ MAX_IO_APICS);
-+ if (!count) {
-+ printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
-+ return -ENODEV;
-+ } else if (count < 0) {
-+ printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
-+ return count;
-+ }
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
-+ NR_IRQ_VECTORS);
-+ if (count < 0) {
-+ printk(KERN_ERR PREFIX
-+ "Error parsing interrupt source overrides entry\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return count;
-+ }
-+
-+ /*
-+ * If BIOS did not supply an INT_SRC_OVR for the SCI
-+ * pretend we got one so we can set the SCI flags.
-+ */
-+ if (!acpi_sci_override_gsi)
-+ acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
-+
-+ /* Fill in identity legacy mapings where no override */
-+ mp_config_acpi_legacy_irqs();
-+
-+ count =
-+ acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
-+ NR_IRQ_VECTORS);
-+ if (count < 0) {
-+ printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
-+ /* TBD: Cleanup to allow fallback to MPS */
-+ return count;
-+ }
-+
-+ return 0;
-+}
-+#else
-+static inline int acpi_parse_madt_ioapic_entries(void)
-+{
-+ return -1;
-+}
-+#endif /* !CONFIG_X86_IO_APIC */
-+
-+static void __init acpi_process_madt(void)
-+{
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ int count, error;
-+
-+ count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
-+ if (count >= 1) {
-+
-+ /*
-+ * Parse MADT LAPIC entries
-+ */
-+ error = acpi_parse_madt_lapic_entries();
-+ if (!error) {
-+ acpi_lapic = 1;
-+
-+#ifdef CONFIG_X86_GENERICARCH
-+ generic_bigsmp_probe();
-+#endif
-+ /*
-+ * Parse MADT IO-APIC entries
-+ */
-+ error = acpi_parse_madt_ioapic_entries();
-+ if (!error) {
-+ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
-+ acpi_irq_balance_set(NULL);
-+ acpi_ioapic = 1;
-+
-+ smp_found_config = 1;
-+ clustered_apic_check();
-+ }
-+ }
-+ if (error == -EINVAL) {
-+ /*
-+ * Dell Precision Workstation 410, 610 come here.
-+ */
-+ printk(KERN_ERR PREFIX
-+ "Invalid BIOS MADT, disabling ACPI\n");
-+ disable_acpi();
-+ }
-+ }
-+#endif
-+ return;
-+}
-+
-+extern int acpi_force;
-+
-+#ifdef __i386__
-+
-+static int __init disable_acpi_irq(struct dmi_system_id *d)
-+{
-+ if (!acpi_force) {
-+ printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
-+ d->ident);
-+ acpi_noirq_set();
-+ }
-+ return 0;
-+}
-+
-+static int __init disable_acpi_pci(struct dmi_system_id *d)
-+{
-+ if (!acpi_force) {
-+ printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
-+ d->ident);
-+ acpi_disable_pci();
-+ }
-+ return 0;
-+}
-+
-+static int __init dmi_disable_acpi(struct dmi_system_id *d)
-+{
-+ if (!acpi_force) {
-+ printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
-+ disable_acpi();
-+ } else {
-+ printk(KERN_NOTICE
-+ "Warning: DMI blacklist says broken, but acpi forced\n");
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Limit ACPI to CPU enumeration for HT
-+ */
-+static int __init force_acpi_ht(struct dmi_system_id *d)
-+{
-+ if (!acpi_force) {
-+ printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
-+ d->ident);
-+ disable_acpi();
-+ acpi_ht = 1;
-+ } else {
-+ printk(KERN_NOTICE
-+ "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * If your system is blacklisted here, but you find that acpi=force
-+ * works for you, please contact acpi-devel@sourceforge.net
-+ */
-+static struct dmi_system_id __initdata acpi_dmi_table[] = {
-+ /*
-+ * Boxes that need ACPI disabled
-+ */
-+ {
-+ .callback = dmi_disable_acpi,
-+ .ident = "IBM Thinkpad",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
-+ },
-+ },
-+
-+ /*
-+ * Boxes that need acpi=ht
-+ */
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "FSC Primergy T850",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "DELL GX240",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
-+ DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "HP VISUALIZE NT Workstation",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "Compaq Workstation W8000",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "ASUS P4B266",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "ASUS P2B-DS",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "ASUS CUR-DLS",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "ABIT i440BX-W83977",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
-+ DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "IBM Bladecenter",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "IBM eServer xSeries 360",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "IBM eserver xSeries 330",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
-+ },
-+ },
-+ {
-+ .callback = force_acpi_ht,
-+ .ident = "IBM eserver xSeries 440",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
-+ },
-+ },
-+
-+ /*
-+ * Boxes that need ACPI PCI IRQ routing disabled
-+ */
-+ {
-+ .callback = disable_acpi_irq,
-+ .ident = "ASUS A7V",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
-+ DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
-+ /* newer BIOS, Revision 1011, does work */
-+ DMI_MATCH(DMI_BIOS_VERSION,
-+ "ASUS A7V ACPI BIOS Revision 1007"),
-+ },
-+ },
-+
-+ /*
-+ * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
-+ */
-+ { /* _BBN 0 bug */
-+ .callback = disable_acpi_pci,
-+ .ident = "ASUS PR-DLS",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
-+ DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
-+ DMI_MATCH(DMI_BIOS_VERSION,
-+ "ASUS PR-DLS ACPI BIOS Revision 1010"),
-+ DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
-+ },
-+ },
-+ {
-+ .callback = disable_acpi_pci,
-+ .ident = "Acer TravelMate 36x Laptop",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
-+ },
-+ },
-+ {}
-+};
-+
-+#endif /* __i386__ */
-+
-+/*
-+ * acpi_boot_table_init() and acpi_boot_init()
-+ * called from setup_arch(), always.
-+ * 1. checksums all tables
-+ * 2. enumerates lapics
-+ * 3. enumerates io-apics
-+ *
-+ * acpi_table_init() is separate to allow reading SRAT without
-+ * other side effects.
-+ *
-+ * side effects of acpi_boot_init:
-+ * acpi_lapic = 1 if LAPIC found
-+ * acpi_ioapic = 1 if IOAPIC found
-+ * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
-+ * if acpi_blacklisted() acpi_disabled = 1;
-+ * acpi_irq_model=...
-+ * ...
-+ *
-+ * return value: (currently ignored)
-+ * 0: success
-+ * !0: failure
-+ */
-+
-+int __init acpi_boot_table_init(void)
-+{
-+ int error;
-+
-+#ifdef __i386__
-+ dmi_check_system(acpi_dmi_table);
-+#endif
-+
-+ /*
-+ * If acpi_disabled, bail out
-+ * One exception: acpi=ht continues far enough to enumerate LAPICs
-+ */
-+ if (acpi_disabled && !acpi_ht)
-+ return 1;
-+
-+ /*
-+ * Initialize the ACPI boot-time table parser.
-+ */
-+ error = acpi_table_init();
-+ if (error) {
-+ disable_acpi();
-+ return error;
-+ }
-+#ifdef __i386__
-+ check_acpi_pci();
-+#endif
-+
-+ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
-+
-+ /*
-+ * blacklist may disable ACPI entirely
-+ */
-+ error = acpi_blacklisted();
-+ if (error) {
-+ if (acpi_force) {
-+ printk(KERN_WARNING PREFIX "acpi=force override\n");
-+ } else {
-+ printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
-+ disable_acpi();
-+ return error;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+int __init acpi_boot_init(void)
-+{
-+ /*
-+ * If acpi_disabled, bail out
-+ * One exception: acpi=ht continues far enough to enumerate LAPICs
-+ */
-+ if (acpi_disabled && !acpi_ht)
-+ return 1;
-+
-+ acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
-+
-+ /*
-+ * set sci_int and PM timer address
-+ */
-+ acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
-+
-+ /*
-+ * Process the Multiple APIC Description Table (MADT), if present
-+ */
-+ acpi_process_madt();
-+
-+ acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
-+
-+ return 0;
-+}
-diff --git a/arch/i386/kernel/apic-xen.c b/arch/i386/kernel/apic-xen.c
-new file mode 100644
-index 0000000..07a0994
---- /dev/null
-+++ b/arch/i386/kernel/apic-xen.c
-@@ -0,0 +1,140 @@
-+/*
-+ * Local APIC handling, local APIC timers
-+ *
-+ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
-+ * thanks to Eric Gilmore
-+ * and Rolf G. Tews
-+ * for testing these extensively.
-+ * Maciej W. Rozycki : Various updates and fixes.
-+ * Mikael Pettersson : Power Management for UP-APIC.
-+ * Pavel Machek and
-+ * Mikael Pettersson : PM converted to driver model.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/sysdev.h>
-+#include <linux/cpu.h>
-+#include <linux/module.h>
-+
-+#include <asm/atomic.h>
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/hpet.h>
-+#include <asm/i8253.h>
-+
-+#include <mach_apic.h>
-+#include <mach_ipi.h>
-+
-+#include "io_ports.h"
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
-+ * IPIs in place of local APIC timers
-+ */
-+static cpumask_t timer_bcast_ipi;
-+#endif
-+
-+/*
-+ * Knob to control our willingness to enable the local APIC.
-+ */
-+int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
-+
-+/*
-+ * Debug level
-+ */
-+int apic_verbosity;
-+
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+ printk("unexpected IRQ trap at vector %02x\n", irq);
-+ /*
-+ * Currently unexpected vectors happen only on SMP and APIC.
-+ * We _must_ ack these because every local APIC has only N
-+ * irq slots per priority level, and a 'hanging, unacked' IRQ
-+ * holds up an irq slot - in excessive cases (when multiple
-+ * unexpected vectors occur) that might lock up the APIC
-+ * completely.
-+ * But only ack when the APIC is enabled -AK
-+ */
-+ if (cpu_has_apic)
-+ ack_APIC_irq();
-+}
-+
-+int get_physical_broadcast(void)
-+{
-+ return 0xff;
-+}
-+
-+#ifndef CONFIG_XEN
-+#ifndef CONFIG_SMP
-+static void up_apic_timer_interrupt_call(struct pt_regs *regs)
-+{
-+ int cpu = smp_processor_id();
-+
-+ /*
-+ * the NMI deadlock-detector uses this.
-+ */
-+ per_cpu(irq_stat, cpu).apic_timer_irqs++;
-+
-+ smp_local_timer_interrupt(regs);
-+}
-+#endif
-+
-+void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
-+{
-+ cpumask_t mask;
-+
-+ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
-+ if (!cpus_empty(mask)) {
-+#ifdef CONFIG_SMP
-+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
-+#else
-+ /*
-+ * We can directly call the apic timer interrupt handler
-+ * in UP case. Minus all irq related functions
-+ */
-+ up_apic_timer_interrupt_call(regs);
-+#endif
-+ }
-+}
-+#endif
-+
-+int setup_profiling_timer(unsigned int multiplier)
-+{
-+ return -EINVAL;
-+}
-+
-+/*
-+ * This initializes the IO-APIC and APIC hardware if this is
-+ * a UP kernel.
-+ */
-+int __init APIC_init_uniprocessor (void)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+ if (smp_found_config)
-+ if (!skip_ioapic_setup && nr_ioapics)
-+ setup_IO_APIC();
-+#endif
-+
-+ return 0;
-+}
-diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
-index 36d66e2..3c4a0f4 100644
---- a/arch/i386/kernel/asm-offsets.c
-+++ b/arch/i386/kernel/asm-offsets.c
-@@ -13,6 +13,7 @@
- #include <asm/fixmap.h>
- #include <asm/processor.h>
- #include <asm/thread_info.h>
-+#include <asm/elf.h>
-
- #define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-@@ -63,10 +64,12 @@ void foo(void)
- OFFSET(pbe_orig_address, pbe, orig_address);
- OFFSET(pbe_next, pbe, next);
-
-+#ifdef CONFIG_X86_SYSENTER
- /* Offset from the sysenter stack to tss.esp0 */
- DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) -
- sizeof(struct tss_struct));
-+#endif
-
- DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
-- DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL));
-+ DEFINE(VSYSCALL_BASE, VSYSCALL_BASE);
- }
-diff --git a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile
-index 010aecf..753f1d7 100644
---- a/arch/i386/kernel/cpu/Makefile
-+++ b/arch/i386/kernel/cpu/Makefile
-@@ -17,3 +17,8 @@ obj-$(CONFIG_X86_MCE) += mcheck/
-
- obj-$(CONFIG_MTRR) += mtrr/
- obj-$(CONFIG_CPU_FREQ) += cpufreq/
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y), $(src))
-+endif
-diff --git a/arch/i386/kernel/cpu/common-xen.c b/arch/i386/kernel/cpu/common-xen.c
-new file mode 100644
-index 0000000..2fc25e8
---- /dev/null
-+++ b/arch/i386/kernel/cpu/common-xen.c
-@@ -0,0 +1,723 @@
-+#include <linux/init.h>
-+#include <linux/string.h>
-+#include <linux/delay.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <linux/bootmem.h>
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/msr.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#include <asm/mpspec.h>
-+#include <asm/apic.h>
-+#include <mach_apic.h>
-+#endif
-+#include <asm/hypervisor.h>
-+
-+#include "cpu.h"
-+
-+DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
-+EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
-+
-+#ifndef CONFIG_XEN
-+DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-+EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
-+#endif
-+
-+static int cachesize_override __devinitdata = -1;
-+static int disable_x86_fxsr __devinitdata = 0;
-+static int disable_x86_serial_nr __devinitdata = 1;
-+
-+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
-+
-+extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
-+
-+extern int disable_pse;
-+
-+static void default_init(struct cpuinfo_x86 * c)
-+{
-+ /* Not much we can do here... */
-+ /* Check if at least it has cpuid */
-+ if (c->cpuid_level == -1) {
-+ /* No cpuid. It must be an ancient CPU */
-+ if (c->x86 == 4)
-+ strcpy(c->x86_model_id, "486");
-+ else if (c->x86 == 3)
-+ strcpy(c->x86_model_id, "386");
-+ }
-+}
-+
-+static struct cpu_dev default_cpu = {
-+ .c_init = default_init,
-+ .c_vendor = "Unknown",
-+};
-+static struct cpu_dev * this_cpu = &default_cpu;
-+
-+static int __init cachesize_setup(char *str)
-+{
-+ get_option (&str, &cachesize_override);
-+ return 1;
-+}
-+__setup("cachesize=", cachesize_setup);
-+
-+int __devinit get_model_name(struct cpuinfo_x86 *c)
-+{
-+ unsigned int *v;
-+ char *p, *q;
-+
-+ if (cpuid_eax(0x80000000) < 0x80000004)
-+ return 0;
-+
-+ v = (unsigned int *) c->x86_model_id;
-+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+ c->x86_model_id[48] = 0;
-+
-+ /* Intel chips right-justify this string for some dumb reason;
-+ undo that brain damage */
-+ p = q = &c->x86_model_id[0];
-+ while ( *p == ' ' )
-+ p++;
-+ if ( p != q ) {
-+ while ( *p )
-+ *q++ = *p++;
-+ while ( q <= &c->x86_model_id[48] )
-+ *q++ = '\0'; /* Zero-pad the rest */
-+ }
-+
-+ return 1;
-+}
-+
-+
-+void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
-+{
-+ unsigned int n, dummy, ecx, edx, l2size;
-+
-+ n = cpuid_eax(0x80000000);
-+
-+ if (n >= 0x80000005) {
-+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
-+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+ c->x86_cache_size=(ecx>>24)+(edx>>24);
-+ }
-+
-+ if (n < 0x80000006) /* Some chips just has a large L1. */
-+ return;
-+
-+ ecx = cpuid_ecx(0x80000006);
-+ l2size = ecx >> 16;
-+
-+ /* do processor-specific cache resizing */
-+ if (this_cpu->c_size_cache)
-+ l2size = this_cpu->c_size_cache(c,l2size);
-+
-+ /* Allow user to override all this if necessary. */
-+ if (cachesize_override != -1)
-+ l2size = cachesize_override;
-+
-+ if ( l2size == 0 )
-+ return; /* Again, no L2 cache is possible */
-+
-+ c->x86_cache_size = l2size;
-+
-+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+ l2size, ecx & 0xFF);
-+}
-+
-+/* Naming convention should be: <Name> [(<Codename>)] */
-+/* This table only is used unless init_<vendor>() below doesn't set it; */
-+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
-+
-+/* Look up CPU names by table lookup. */
-+static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
-+{
-+ struct cpu_model_info *info;
-+
-+ if ( c->x86_model >= 16 )
-+ return NULL; /* Range check */
-+
-+ if (!this_cpu)
-+ return NULL;
-+
-+ info = this_cpu->c_models;
-+
-+ while (info && info->family) {
-+ if (info->family == c->x86)
-+ return info->model_names[c->x86_model];
-+ info++;
-+ }
-+ return NULL; /* Not found */
-+}
-+
-+
-+static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
-+{
-+ char *v = c->x86_vendor_id;
-+ int i;
-+ static int printed;
-+
-+ for (i = 0; i < X86_VENDOR_NUM; i++) {
-+ if (cpu_devs[i]) {
-+ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
-+ (cpu_devs[i]->c_ident[1] &&
-+ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
-+ c->x86_vendor = i;
-+ if (!early)
-+ this_cpu = cpu_devs[i];
-+ return;
-+ }
-+ }
-+ }
-+ if (!printed) {
-+ printed++;
-+ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
-+ printk(KERN_ERR "CPU: Your system may be unstable.\n");
-+ }
-+ c->x86_vendor = X86_VENDOR_UNKNOWN;
-+ this_cpu = &default_cpu;
-+}
-+
-+
-+static int __init x86_fxsr_setup(char * s)
-+{
-+ disable_x86_fxsr = 1;
-+ return 1;
-+}
-+__setup("nofxsr", x86_fxsr_setup);
-+
-+
-+/* Standard macro to see if a specific flag is changeable */
-+static inline int flag_is_changeable_p(u32 flag)
-+{
-+ u32 f1, f2;
-+
-+ asm("pushfl\n\t"
-+ "pushfl\n\t"
-+ "popl %0\n\t"
-+ "movl %0,%1\n\t"
-+ "xorl %2,%0\n\t"
-+ "pushl %0\n\t"
-+ "popfl\n\t"
-+ "pushfl\n\t"
-+ "popl %0\n\t"
-+ "popfl\n\t"
-+ : "=&r" (f1), "=&r" (f2)
-+ : "ir" (flag));
-+
-+ return ((f1^f2) & flag) != 0;
-+}
-+
-+
-+/* Probe for the CPUID instruction */
-+static int __devinit have_cpuid_p(void)
-+{
-+ return flag_is_changeable_p(X86_EFLAGS_ID);
-+}
-+
-+/* Do minimum CPU detection early.
-+ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-+ The others are not touched to avoid unwanted side effects.
-+
-+ WARNING: this function is only called on the BP. Don't add code here
-+ that is supposed to run on all CPUs. */
-+static void __init early_cpu_detect(void)
-+{
-+ struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+ c->x86_cache_alignment = 32;
-+
-+ if (!have_cpuid_p())
-+ return;
-+
-+ /* Get vendor name */
-+ cpuid(0x00000000, &c->cpuid_level,
-+ (int *)&c->x86_vendor_id[0],
-+ (int *)&c->x86_vendor_id[8],
-+ (int *)&c->x86_vendor_id[4]);
-+
-+ get_cpu_vendor(c, 1);
-+
-+ c->x86 = 4;
-+ if (c->cpuid_level >= 0x00000001) {
-+ u32 junk, tfms, cap0, misc;
-+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
-+ c->x86 = (tfms >> 8) & 15;
-+ c->x86_model = (tfms >> 4) & 15;
-+ if (c->x86 == 0xf)
-+ c->x86 += (tfms >> 20) & 0xff;
-+ if (c->x86 >= 0x6)
-+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+ c->x86_mask = tfms & 15;
-+ if (cap0 & (1<<19))
-+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
-+ }
-+}
-+
-+void __devinit generic_identify(struct cpuinfo_x86 * c)
-+{
-+ u32 tfms, xlvl;
-+ int junk;
-+
-+ if (have_cpuid_p()) {
-+ /* Get vendor name */
-+ cpuid(0x00000000, &c->cpuid_level,
-+ (int *)&c->x86_vendor_id[0],
-+ (int *)&c->x86_vendor_id[8],
-+ (int *)&c->x86_vendor_id[4]);
-+
-+ get_cpu_vendor(c, 0);
-+ /* Initialize the standard set of capabilities */
-+ /* Note that the vendor-specific code below might override */
-+
-+ /* Intel-defined flags: level 0x00000001 */
-+ if ( c->cpuid_level >= 0x00000001 ) {
-+ u32 capability, excap;
-+ cpuid(0x00000001, &tfms, &junk, &excap, &capability);
-+ c->x86_capability[0] = capability;
-+ c->x86_capability[4] = excap;
-+ c->x86 = (tfms >> 8) & 15;
-+ c->x86_model = (tfms >> 4) & 15;
-+ if (c->x86 == 0xf) {
-+ c->x86 += (tfms >> 20) & 0xff;
-+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+ }
-+ c->x86_mask = tfms & 15;
-+ } else {
-+ /* Have CPUID level 0 only - unheard of */
-+ c->x86 = 4;
-+ }
-+
-+ /* AMD-defined flags: level 0x80000001 */
-+ xlvl = cpuid_eax(0x80000000);
-+ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
-+ if ( xlvl >= 0x80000001 ) {
-+ c->x86_capability[1] = cpuid_edx(0x80000001);
-+ c->x86_capability[6] = cpuid_ecx(0x80000001);
-+ }
-+ if ( xlvl >= 0x80000004 )
-+ get_model_name(c); /* Default name */
-+ }
-+ }
-+
-+ early_intel_workaround(c);
-+
-+#ifdef CONFIG_X86_HT
-+ phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
-+#endif
-+}
-+
-+static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-+{
-+ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
-+ /* Disable processor serial number */
-+ unsigned long lo,hi;
-+ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-+ lo |= 0x200000;
-+ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-+ printk(KERN_NOTICE "CPU serial number disabled.\n");
-+ clear_bit(X86_FEATURE_PN, c->x86_capability);
-+
-+ /* Disabling the serial number may affect the cpuid level */
-+ c->cpuid_level = cpuid_eax(0);
-+ }
-+}
-+
-+static int __init x86_serial_nr_setup(char *s)
-+{
-+ disable_x86_serial_nr = 0;
-+ return 1;
-+}
-+__setup("serialnumber", x86_serial_nr_setup);
-+
-+
-+
-+/*
-+ * This does the hard work of actually picking apart the CPU stuff...
-+ */
-+void __devinit identify_cpu(struct cpuinfo_x86 *c)
-+{
-+ int i;
-+
-+ c->loops_per_jiffy = loops_per_jiffy;
-+ c->x86_cache_size = -1;
-+ c->x86_vendor = X86_VENDOR_UNKNOWN;
-+ c->cpuid_level = -1; /* CPUID not detected */
-+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
-+ c->x86_vendor_id[0] = '\0'; /* Unset */
-+ c->x86_model_id[0] = '\0'; /* Unset */
-+ c->x86_max_cores = 1;
-+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
-+
-+ if (!have_cpuid_p()) {
-+ /* First of all, decide if this is a 486 or higher */
-+ /* It's a 486 if we can modify the AC flag */
-+ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
-+ c->x86 = 4;
-+ else
-+ c->x86 = 3;
-+ }
-+
-+ generic_identify(c);
-+
-+ printk(KERN_DEBUG "CPU: After generic identify, caps:");
-+ for (i = 0; i < NCAPINTS; i++)
-+ printk(" %08lx", c->x86_capability[i]);
-+ printk("\n");
-+
-+ if (this_cpu->c_identify) {
-+ this_cpu->c_identify(c);
-+
-+ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
-+ for (i = 0; i < NCAPINTS; i++)
-+ printk(" %08lx", c->x86_capability[i]);
-+ printk("\n");
-+ }
-+
-+ /*
-+ * Vendor-specific initialization. In this section we
-+ * canonicalize the feature flags, meaning if there are
-+ * features a certain CPU supports which CPUID doesn't
-+ * tell us, CPUID claiming incorrect flags, or other bugs,
-+ * we handle them here.
-+ *
-+ * At the end of this section, c->x86_capability better
-+ * indicate the features this CPU genuinely supports!
-+ */
-+ if (this_cpu->c_init)
-+ this_cpu->c_init(c);
-+
-+ /* Disable the PN if appropriate */
-+ squash_the_stupid_serial_number(c);
-+
-+ /*
-+ * The vendor-specific functions might have changed features. Now
-+ * we do "generic changes."
-+ */
-+
-+#ifndef CONFIG_XEN
-+ /* TSC disabled? */
-+ if ( tsc_disable )
-+ clear_bit(X86_FEATURE_TSC, c->x86_capability);
-+#endif
-+
-+ /* FXSR disabled? */
-+ if (disable_x86_fxsr) {
-+ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
-+ clear_bit(X86_FEATURE_XMM, c->x86_capability);
-+ }
-+
-+ if (disable_pse)
-+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
-+
-+ /* If the model name is still unset, do table lookup. */
-+ if ( !c->x86_model_id[0] ) {
-+ char *p;
-+ p = table_lookup_model(c);
-+ if ( p )
-+ strcpy(c->x86_model_id, p);
-+ else
-+ /* Last resort... */
-+ sprintf(c->x86_model_id, "%02x/%02x",
-+ c->x86_vendor, c->x86_model);
-+ }
-+
-+ machine_specific_modify_cpu_capabilities(c);
-+
-+ /* Now the feature flags better reflect actual CPU features! */
-+
-+ printk(KERN_DEBUG "CPU: After all inits, caps:");
-+ for (i = 0; i < NCAPINTS; i++)
-+ printk(" %08lx", c->x86_capability[i]);
-+ printk("\n");
-+
-+ /*
-+ * On SMP, boot_cpu_data holds the common feature set between
-+ * all CPUs; so make sure that we indicate which features are
-+ * common between the CPUs. The first time this routine gets
-+ * executed, c == &boot_cpu_data.
-+ */
-+ if ( c != &boot_cpu_data ) {
-+ /* AND the already accumulated flags with these */
-+ for ( i = 0 ; i < NCAPINTS ; i++ )
-+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-+ }
-+
-+ /* Init Machine Check Exception if available. */
-+ mcheck_init(c);
-+
-+ if (c == &boot_cpu_data)
-+ sysenter_setup();
-+ enable_sep_cpu();
-+
-+ if (c == &boot_cpu_data)
-+ mtrr_bp_init();
-+ else
-+ mtrr_ap_init();
-+}
-+
-+#ifdef CONFIG_X86_HT
-+void __devinit detect_ht(struct cpuinfo_x86 *c)
-+{
-+ u32 eax, ebx, ecx, edx;
-+ int index_msb, core_bits;
-+ int cpu = smp_processor_id();
-+
-+ cpuid(1, &eax, &ebx, &ecx, &edx);
-+
-+ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
-+
-+ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+ return;
-+
-+ smp_num_siblings = (ebx & 0xff0000) >> 16;
-+
-+ if (smp_num_siblings == 1) {
-+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
-+ } else if (smp_num_siblings > 1 ) {
-+
-+ if (smp_num_siblings > NR_CPUS) {
-+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
-+ smp_num_siblings = 1;
-+ return;
-+ }
-+
-+ index_msb = get_count_order(smp_num_siblings);
-+ phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
-+
-+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
-+ phys_proc_id[cpu]);
-+
-+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
-+
-+ index_msb = get_count_order(smp_num_siblings) ;
-+
-+ core_bits = get_count_order(c->x86_max_cores);
-+
-+ cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
-+ ((1 << core_bits) - 1);
-+
-+ if (c->x86_max_cores > 1)
-+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
-+ cpu_core_id[cpu]);
-+ }
-+}
-+#endif
-+
-+void __devinit print_cpu_info(struct cpuinfo_x86 *c)
-+{
-+ char *vendor = NULL;
-+
-+ if (c->x86_vendor < X86_VENDOR_NUM)
-+ vendor = this_cpu->c_vendor;
-+ else if (c->cpuid_level >= 0)
-+ vendor = c->x86_vendor_id;
-+
-+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
-+ printk("%s ", vendor);
-+
-+ if (!c->x86_model_id[0])
-+ printk("%d86", c->x86);
-+ else
-+ printk("%s", c->x86_model_id);
-+
-+ if (c->x86_mask || c->cpuid_level >= 0)
-+ printk(" stepping %02x\n", c->x86_mask);
-+ else
-+ printk("\n");
-+}
-+
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-+
-+/* This is hacky. :)
-+ * We're emulating future behavior.
-+ * In the future, the cpu-specific init functions will be called implicitly
-+ * via the magic of initcalls.
-+ * They will insert themselves into the cpu_devs structure.
-+ * Then, when cpu_init() is called, we can just iterate over that array.
-+ */
-+
-+extern int intel_cpu_init(void);
-+extern int cyrix_init_cpu(void);
-+extern int nsc_init_cpu(void);
-+extern int amd_init_cpu(void);
-+extern int centaur_init_cpu(void);
-+extern int transmeta_init_cpu(void);
-+extern int rise_init_cpu(void);
-+extern int nexgen_init_cpu(void);
-+extern int umc_init_cpu(void);
-+
-+void __init early_cpu_init(void)
-+{
-+ intel_cpu_init();
-+ cyrix_init_cpu();
-+ nsc_init_cpu();
-+ amd_init_cpu();
-+ centaur_init_cpu();
-+ transmeta_init_cpu();
-+ rise_init_cpu();
-+ nexgen_init_cpu();
-+ umc_init_cpu();
-+ early_cpu_detect();
-+
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+ /* pse is not compatible with on-the-fly unmapping,
-+ * disable it even if the cpus claim to support it.
-+ */
-+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+ disable_pse = 1;
-+#endif
-+}
-+
-+void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
-+{
-+ unsigned long frames[16];
-+ unsigned long va;
-+ int f;
-+
-+ for (va = gdt_descr->address, f = 0;
-+ va < gdt_descr->address + gdt_descr->size;
-+ va += PAGE_SIZE, f++) {
-+ frames[f] = virt_to_mfn(va);
-+ make_lowmem_page_readonly(
-+ (void *)va, XENFEAT_writable_descriptor_tables);
-+ }
-+ if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
-+ BUG();
-+}
-+
-+/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
-+ */
-+void __cpuinit cpu_init(void)
-+{
-+ int cpu = smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct * t = &per_cpu(init_tss, cpu);
-+#endif
-+ struct thread_struct *thread = &current->thread;
-+ struct desc_struct *gdt;
-+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
-+
-+ if (cpu_test_and_set(cpu, cpu_initialized)) {
-+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
-+ for (;;) local_irq_enable();
-+ }
-+ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
-+
-+ if (cpu_has_vme || cpu_has_de)
-+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-+#ifndef CONFIG_XEN
-+ if (tsc_disable && cpu_has_tsc) {
-+ printk(KERN_NOTICE "Disabling TSC...\n");
-+ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
-+ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
-+ set_in_cr4(X86_CR4_TSD);
-+ }
-+#endif
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * This is a horrible hack to allocate the GDT. The problem
-+ * is that cpu_init() is called really early for the boot CPU
-+ * (and hence needs bootmem) but much later for the secondary
-+ * CPUs, when bootmem will have gone away
-+ */
-+ if (NODE_DATA(0)->bdata->node_bootmem_map) {
-+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
-+ /* alloc_bootmem_pages panics on failure, so no check */
-+ memset(gdt, 0, PAGE_SIZE);
-+ } else {
-+ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
-+ if (unlikely(!gdt)) {
-+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
-+ for (;;)
-+ local_irq_enable();
-+ }
-+ }
-+
-+ /*
-+ * Initialize the per-CPU GDT with the boot GDT,
-+ * and set up the GDT descriptor:
-+ */
-+ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
-+
-+ /* Set up GDT entry for 16bit stack */
-+ *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
-+ ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
-+ ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
-+ (CPU_16BIT_STACK_SIZE - 1);
-+
-+ cpu_gdt_descr->size = GDT_SIZE - 1;
-+ cpu_gdt_descr->address = (unsigned long)gdt;
-+#else
-+ if (cpu == 0 && cpu_gdt_descr->address == 0) {
-+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
-+ /* alloc_bootmem_pages panics on failure, so no check */
-+ memset(gdt, 0, PAGE_SIZE);
-+
-+ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
-+
-+ cpu_gdt_descr->size = GDT_SIZE;
-+ cpu_gdt_descr->address = (unsigned long)gdt;
-+ }
-+#endif
-+
-+ cpu_gdt_init(cpu_gdt_descr);
-+
-+ /*
-+ * Set up and load the per-CPU TSS and LDT
-+ */
-+ atomic_inc(&init_mm.mm_count);
-+ current->active_mm = &init_mm;
-+ if (current->mm)
-+ BUG();
-+ enter_lazy_tlb(&init_mm, current);
-+
-+ load_esp0(t, thread);
-+
-+ load_LDT(&init_mm.context);
-+
-+#ifdef CONFIG_DOUBLEFAULT
-+ /* Set up doublefault TSS pointer in the GDT */
-+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
-+#endif
-+
-+ /* Clear %fs and %gs. */
-+ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
-+
-+ /* Clear all 6 debug registers: */
-+ set_debugreg(0, 0);
-+ set_debugreg(0, 1);
-+ set_debugreg(0, 2);
-+ set_debugreg(0, 3);
-+ set_debugreg(0, 6);
-+ set_debugreg(0, 7);
-+
-+ /*
-+ * Force FPU initialization:
-+ */
-+ current_thread_info()->status = 0;
-+ clear_used_math();
-+ mxcsr_feature_mask_init();
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+void __devinit cpu_uninit(void)
-+{
-+ int cpu = raw_smp_processor_id();
-+ cpu_clear(cpu, cpu_initialized);
-+
-+ /* lazy TLB state */
-+ per_cpu(cpu_tlbstate, cpu).state = 0;
-+ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
-+}
-+#endif
-diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
-index 26892d2..be1850c 100644
---- a/arch/i386/kernel/cpu/cpufreq/Kconfig
-+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
-@@ -158,7 +158,7 @@ config X86_SPEEDSTEP_ICH
- config X86_SPEEDSTEP_SMI
- tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
- select CPU_FREQ_TABLE
-- depends on EXPERIMENTAL
-+ depends on EXPERIMENTAL && !X86_XEN
- help
- This adds the CPUFreq driver for certain mobile Intel Pentium III
- (Coppermine), all mobile Intel Pentium III-M (Tualatin)
-diff --git a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile
-index a25b701..06df4fe 100644
---- a/arch/i386/kernel/cpu/mtrr/Makefile
-+++ b/arch/i386/kernel/cpu/mtrr/Makefile
-@@ -3,3 +3,10 @@ obj-y += amd.o
- obj-y += cyrix.o
- obj-y += centaur.o
-
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+n-obj-xen := generic.o state.o amd.o cyrix.o centaur.o
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff --git a/arch/i386/kernel/cpu/mtrr/main-xen.c b/arch/i386/kernel/cpu/mtrr/main-xen.c
-new file mode 100644
-index 0000000..407cc78
---- /dev/null
-+++ b/arch/i386/kernel/cpu/mtrr/main-xen.c
-@@ -0,0 +1,187 @@
-+#include <linux/init.h>
-+#include <linux/proc_fs.h>
-+#include <linux/ctype.h>
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <asm/uaccess.h>
-+
-+#include <asm/mtrr.h>
-+#include "mtrr.h"
-+
-+void generic_get_mtrr(unsigned int reg, unsigned long *base,
-+ unsigned int *size, mtrr_type * type)
-+{
-+ dom0_op_t op;
-+
-+ op.cmd = DOM0_READ_MEMTYPE;
-+ op.u.read_memtype.reg = reg;
-+ (void)HYPERVISOR_dom0_op(&op);
-+
-+ *size = op.u.read_memtype.nr_mfns;
-+ *base = op.u.read_memtype.mfn;
-+ *type = op.u.read_memtype.type;
-+}
-+
-+struct mtrr_ops generic_mtrr_ops = {
-+ .use_intel_if = 1,
-+ .get = generic_get_mtrr,
-+};
-+
-+struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
-+unsigned int num_var_ranges;
-+unsigned int *usage_table;
-+
-+static void __init set_num_var_ranges(void)
-+{
-+ dom0_op_t op;
-+
-+ for (num_var_ranges = 0; ; num_var_ranges++) {
-+ op.cmd = DOM0_READ_MEMTYPE;
-+ op.u.read_memtype.reg = num_var_ranges;
-+ if (HYPERVISOR_dom0_op(&op) != 0)
-+ break;
-+ }
-+}
-+
-+static void __init init_table(void)
-+{
-+ int i, max;
-+
-+ max = num_var_ranges;
-+ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
-+ == NULL) {
-+ printk(KERN_ERR "mtrr: could not allocate\n");
-+ return;
-+ }
-+ for (i = 0; i < max; i++)
-+ usage_table[i] = 0;
-+}
-+
-+int mtrr_add_page(unsigned long base, unsigned long size,
-+ unsigned int type, char increment)
-+{
-+ int error;
-+ dom0_op_t op;
-+
-+ op.cmd = DOM0_ADD_MEMTYPE;
-+ op.u.add_memtype.mfn = base;
-+ op.u.add_memtype.nr_mfns = size;
-+ op.u.add_memtype.type = type;
-+ error = HYPERVISOR_dom0_op(&op);
-+ if (error) {
-+ BUG_ON(error > 0);
-+ return error;
-+ }
-+
-+ if (increment)
-+ ++usage_table[op.u.add_memtype.reg];
-+
-+ return op.u.add_memtype.reg;
-+}
-+
-+static int mtrr_check(unsigned long base, unsigned long size)
-+{
-+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
-+ printk(KERN_WARNING
-+ "mtrr: size and base must be multiples of 4 kiB\n");
-+ printk(KERN_DEBUG
-+ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
-+ dump_stack();
-+ return -1;
-+ }
-+ return 0;
-+}
-+
-+int
-+mtrr_add(unsigned long base, unsigned long size, unsigned int type,
-+ char increment)
-+{
-+ if (mtrr_check(base, size))
-+ return -EINVAL;
-+ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
-+ increment);
-+}
-+
-+int mtrr_del_page(int reg, unsigned long base, unsigned long size)
-+{
-+ int i, max;
-+ mtrr_type ltype;
-+ unsigned long lbase;
-+ unsigned int lsize;
-+ int error = -EINVAL;
-+ dom0_op_t op;
-+
-+ max = num_var_ranges;
-+ if (reg < 0) {
-+ /* Search for existing MTRR */
-+ for (i = 0; i < max; ++i) {
-+ mtrr_if->get(i, &lbase, &lsize, &ltype);
-+ if (lbase == base && lsize == size) {
-+ reg = i;
-+ break;
-+ }
-+ }
-+ if (reg < 0) {
-+ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
-+ size);
-+ goto out;
-+ }
-+ }
-+ if (usage_table[reg] < 1) {
-+ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
-+ goto out;
-+ }
-+ if (--usage_table[reg] < 1) {
-+ op.cmd = DOM0_DEL_MEMTYPE;
-+ op.u.del_memtype.handle = 0;
-+ op.u.del_memtype.reg = reg;
-+ error = HYPERVISOR_dom0_op(&op);
-+ if (error) {
-+ BUG_ON(error > 0);
-+ goto out;
-+ }
-+ }
-+ error = reg;
-+ out:
-+ return error;
-+}
-+
-+int
-+mtrr_del(int reg, unsigned long base, unsigned long size)
-+{
-+ if (mtrr_check(base, size))
-+ return -EINVAL;
-+ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
-+}
-+
-+EXPORT_SYMBOL(mtrr_add);
-+EXPORT_SYMBOL(mtrr_del);
-+
-+void __init mtrr_bp_init(void)
-+{
-+}
-+
-+void mtrr_ap_init(void)
-+{
-+}
-+
-+static int __init mtrr_init(void)
-+{
-+ struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+ if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+ return -ENODEV;
-+
-+ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
-+ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
-+ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
-+ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
-+ return -ENODEV;
-+
-+ set_num_var_ranges();
-+ init_table();
-+
-+ return 0;
-+}
-+
-+subsys_initcall(mtrr_init);
-diff --git a/arch/i386/kernel/early_printk-xen.c b/arch/i386/kernel/early_printk-xen.c
-new file mode 100644
-index 0000000..7a5d206
---- /dev/null
-+++ b/arch/i386/kernel/early_printk-xen.c
-@@ -0,0 +1,2 @@
-+
-+#include "../../x86_64/kernel/early_printk-xen.c"
-diff --git a/arch/i386/kernel/entry-xen.S b/arch/i386/kernel/entry-xen.S
-new file mode 100644
-index 0000000..a86e8f5
---- /dev/null
-+++ b/arch/i386/kernel/entry-xen.S
-@@ -0,0 +1,857 @@
-+/*
-+ * linux/arch/i386/entry.S
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ */
-+
-+/*
-+ * entry.S contains the system-call and fault low-level handling routines.
-+ * This also contains the timer-interrupt handler, as well as all interrupts
-+ * and faults that can result in a task-switch.
-+ *
-+ * NOTE: This code handles signal-recognition, which happens every time
-+ * after a timer-interrupt and after each system call.
-+ *
-+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
-+ * on a 486.
-+ *
-+ * Stack layout in 'ret_from_system_call':
-+ * ptrace needs to have all regs on the stack.
-+ * if the order here is changed, it needs to be
-+ * updated in fork.c:copy_process, signal.c:do_signal,
-+ * ptrace.c and ptrace.h
-+ *
-+ * 0(%esp) - %ebx
-+ * 4(%esp) - %ecx
-+ * 8(%esp) - %edx
-+ * C(%esp) - %esi
-+ * 10(%esp) - %edi
-+ * 14(%esp) - %ebp
-+ * 18(%esp) - %eax
-+ * 1C(%esp) - %ds
-+ * 20(%esp) - %es
-+ * 24(%esp) - orig_eax
-+ * 28(%esp) - %eip
-+ * 2C(%esp) - %cs
-+ * 30(%esp) - %eflags
-+ * 34(%esp) - %oldesp
-+ * 38(%esp) - %oldss
-+ *
-+ * "current" is in register %ebx during any slow entries.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/linkage.h>
-+#include <asm/thread_info.h>
-+#include <asm/errno.h>
-+#include <asm/segment.h>
-+#include <asm/smp.h>
-+#include <asm/page.h>
-+#include <asm/desc.h>
-+#include "irq_vectors.h"
-+#include <xen/interface/xen.h>
-+
-+#define nr_syscalls ((syscall_table_size)/4)
-+
-+EBX = 0x00
-+ECX = 0x04
-+EDX = 0x08
-+ESI = 0x0C
-+EDI = 0x10
-+EBP = 0x14
-+EAX = 0x18
-+DS = 0x1C
-+ES = 0x20
-+ORIG_EAX = 0x24
-+EIP = 0x28
-+CS = 0x2C
-+EVENT_MASK = 0x2E
-+EFLAGS = 0x30
-+OLDESP = 0x34
-+OLDSS = 0x38
-+
-+CF_MASK = 0x00000001
-+TF_MASK = 0x00000100
-+IF_MASK = 0x00000200
-+DF_MASK = 0x00000400
-+NT_MASK = 0x00004000
-+VM_MASK = 0x00020000
-+/* Pseudo-eflags. */
-+NMI_MASK = 0x80000000
-+
-+/* Offsets into shared_info_t. */
-+#define evtchn_upcall_pending /* 0 */
-+#define evtchn_upcall_mask 1
-+
-+#define sizeof_vcpu_shift 6
-+
-+#ifdef CONFIG_SMP
-+#define preempt_disable(reg) incl TI_preempt_count(reg)
-+#define preempt_enable(reg) decl TI_preempt_count(reg)
-+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%ebp) ; \
-+ movl TI_cpu(%ebp),reg ; \
-+ shl $sizeof_vcpu_shift,reg ; \
-+ addl HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
-+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
-+#else
-+#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg)
-+#define XEN_PUT_VCPU_INFO_fixup
-+#endif
-+
-+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
-+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
-+ XEN_PUT_VCPU_INFO(reg)
-+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
-+ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
-+ XEN_PUT_VCPU_INFO(reg)
-+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
-+
-+#ifdef CONFIG_PREEMPT
-+#define preempt_stop GET_THREAD_INFO(%ebp) ; \
-+ XEN_BLOCK_EVENTS(%esi)
-+#else
-+#define preempt_stop
-+#define resume_kernel restore_nocheck
-+#endif
-+
-+#define SAVE_ALL \
-+ cld; \
-+ pushl %es; \
-+ pushl %ds; \
-+ pushl %eax; \
-+ pushl %ebp; \
-+ pushl %edi; \
-+ pushl %esi; \
-+ pushl %edx; \
-+ pushl %ecx; \
-+ pushl %ebx; \
-+ movl $(__USER_DS), %edx; \
-+ movl %edx, %ds; \
-+ movl %edx, %es;
-+
-+#define RESTORE_INT_REGS \
-+ popl %ebx; \
-+ popl %ecx; \
-+ popl %edx; \
-+ popl %esi; \
-+ popl %edi; \
-+ popl %ebp; \
-+ popl %eax
-+
-+#define RESTORE_REGS \
-+ RESTORE_INT_REGS; \
-+1: popl %ds; \
-+2: popl %es; \
-+.section .fixup,"ax"; \
-+3: movl $0,(%esp); \
-+ jmp 1b; \
-+4: movl $0,(%esp); \
-+ jmp 2b; \
-+.previous; \
-+.section __ex_table,"a";\
-+ .align 4; \
-+ .long 1b,3b; \
-+ .long 2b,4b; \
-+.previous
-+
-+
-+#define RESTORE_ALL \
-+ RESTORE_REGS \
-+ addl $4, %esp; \
-+1: iret; \
-+.section .fixup,"ax"; \
-+2: pushl $0; \
-+ pushl $do_iret_error; \
-+ jmp error_code; \
-+.previous; \
-+.section __ex_table,"a";\
-+ .align 4; \
-+ .long 1b,2b; \
-+.previous
-+
-+
-+ENTRY(ret_from_fork)
-+ pushl %eax
-+ call schedule_tail
-+ GET_THREAD_INFO(%ebp)
-+ popl %eax
-+ jmp syscall_exit
-+
-+/*
-+ * Return to user mode is not as complex as all this looks,
-+ * but we want the default path for a system call return to
-+ * go as quickly as possible which is why some of this is
-+ * less clear than it otherwise should be.
-+ */
-+
-+ # userspace resumption stub bypassing syscall exit tracing
-+ ALIGN
-+ret_from_exception:
-+ preempt_stop
-+ret_from_intr:
-+ GET_THREAD_INFO(%ebp)
-+ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
-+ movb CS(%esp), %al
-+ testl $(VM_MASK | 2), %eax
-+ jz resume_kernel
-+ENTRY(resume_userspace)
-+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
-+ # setting need_resched or sigpending
-+ # between sampling and the iret
-+ movl TI_flags(%ebp), %ecx
-+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
-+ # int/exception return?
-+ jne work_pending
-+ jmp restore_all
-+
-+#ifdef CONFIG_PREEMPT
-+ENTRY(resume_kernel)
-+ XEN_BLOCK_EVENTS(%esi)
-+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
-+ jnz restore_nocheck
-+need_resched:
-+ movl TI_flags(%ebp), %ecx # need_resched set ?
-+ testb $_TIF_NEED_RESCHED, %cl
-+ jz restore_all
-+ testb $0xFF,EVENT_MASK(%esp) # interrupts off (exception path) ?
-+ jnz restore_all
-+ call preempt_schedule_irq
-+ jmp need_resched
-+#endif
-+
-+#ifdef CONFIG_X86_SYSENTER
-+/* SYSENTER_RETURN points to after the "sysenter" instruction in
-+ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
-+
-+ # sysenter call handler stub
-+ENTRY(sysenter_entry)
-+ movl TSS_sysenter_esp0(%esp),%esp
-+sysenter_past_esp:
-+ sti
-+ pushl $(__USER_DS)
-+ pushl %ebp
-+ pushfl
-+ pushl $(__USER_CS)
-+ pushl $SYSENTER_RETURN
-+
-+/*
-+ * Load the potential sixth argument from user stack.
-+ * Careful about security.
-+ */
-+ cmpl $__PAGE_OFFSET-3,%ebp
-+ jae syscall_fault
-+1: movl (%ebp),%ebp
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,syscall_fault
-+.previous
-+
-+ pushl %eax
-+ SAVE_ALL
-+ GET_THREAD_INFO(%ebp)
-+
-+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-+ jnz syscall_trace_entry
-+ cmpl $(nr_syscalls), %eax
-+ jae syscall_badsys
-+ call *sys_call_table(,%eax,4)
-+ movl %eax,EAX(%esp)
-+ cli
-+ movl TI_flags(%ebp), %ecx
-+ testw $_TIF_ALLWORK_MASK, %cx
-+ jne syscall_exit_work
-+/* if something modifies registers it must also disable sysexit */
-+ movl EIP(%esp), %edx
-+ movl OLDESP(%esp), %ecx
-+ xorl %ebp,%ebp
-+ sti
-+ sysexit
-+#endif /* CONFIG_X86_SYSENTER */
-+
-+
-+ # system call handler stub
-+ENTRY(system_call)
-+ pushl %eax # save orig_eax
-+ SAVE_ALL
-+ GET_THREAD_INFO(%ebp)
-+ # system call tracing in operation / emulation
-+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-+ jnz syscall_trace_entry
-+ cmpl $(nr_syscalls), %eax
-+ jae syscall_badsys
-+syscall_call:
-+ call *sys_call_table(,%eax,4)
-+ movl %eax,EAX(%esp) # store the return value
-+syscall_exit:
-+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
-+ # setting need_resched or sigpending
-+ # between sampling and the iret
-+ movl TI_flags(%ebp), %ecx
-+ testw $_TIF_ALLWORK_MASK, %cx # current->work
-+ jne syscall_exit_work
-+
-+restore_all:
-+#if 0 /* XEN */
-+ movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
-+ # Warning: OLDSS(%esp) contains the wrong/random values if we
-+ # are returning to the kernel.
-+ # See comments in process.c:copy_thread() for details.
-+ movb OLDSS(%esp), %ah
-+ movb CS(%esp), %al
-+ andl $(VM_MASK | (4 << 8) | 3), %eax
-+ cmpl $((4 << 8) | 3), %eax
-+ je ldt_ss # returning to user-space with LDT SS
-+#endif /* XEN */
-+restore_nocheck:
-+ testl $(VM_MASK|NMI_MASK), EFLAGS(%esp)
-+ jnz hypervisor_iret
-+ movb EVENT_MASK(%esp), %al
-+ notb %al # %al == ~saved_mask
-+ XEN_GET_VCPU_INFO(%esi)
-+ andb evtchn_upcall_mask(%esi),%al
-+ andb $1,%al # %al == mask & ~saved_mask
-+ jnz restore_all_enable_events # != 0 => reenable event delivery
-+ XEN_PUT_VCPU_INFO(%esi)
-+ RESTORE_REGS
-+ addl $4, %esp
-+1: iret
-+.section .fixup,"ax"
-+iret_exc:
-+ pushl $0 # no error code
-+ pushl $do_iret_error
-+ jmp error_code
-+.previous
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,iret_exc
-+.previous
-+
-+hypervisor_iret:
-+ andl $~NMI_MASK, EFLAGS(%esp)
-+ RESTORE_REGS
-+ addl $4, %esp
-+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
-+
-+#if 0 /* XEN */
-+ldt_ss:
-+ larl OLDSS(%esp), %eax
-+ jnz restore_nocheck
-+ testl $0x00400000, %eax # returning to 32bit stack?
-+ jnz restore_nocheck # allright, normal return
-+ /* If returning to userspace with 16bit stack,
-+ * try to fix the higher word of ESP, as the CPU
-+ * won't restore it.
-+ * This is an "official" bug of all the x86-compatible
-+ * CPUs, which we can try to work around to make
-+ * dosemu and wine happy. */
-+ subl $8, %esp # reserve space for switch16 pointer
-+ cli
-+ movl %esp, %eax
-+ /* Set up the 16bit stack frame with switch32 pointer on top,
-+ * and a switch16 pointer on top of the current frame. */
-+ call setup_x86_bogus_stack
-+ RESTORE_REGS
-+ lss 20+4(%esp), %esp # switch to 16bit stack
-+1: iret
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,iret_exc
-+.previous
-+#endif /* XEN */
-+
-+ # perform work that needs to be done immediately before resumption
-+ ALIGN
-+work_pending:
-+ testb $_TIF_NEED_RESCHED, %cl
-+ jz work_notifysig
-+work_resched:
-+ call schedule
-+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
-+ # setting need_resched or sigpending
-+ # between sampling and the iret
-+ movl TI_flags(%ebp), %ecx
-+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
-+ # than syscall tracing?
-+ jz restore_all
-+ testb $_TIF_NEED_RESCHED, %cl
-+ jnz work_resched
-+
-+work_notifysig: # deal with pending signals and
-+ # notify-resume requests
-+ testl $VM_MASK, EFLAGS(%esp)
-+ movl %esp, %eax
-+ jne work_notifysig_v86 # returning to kernel-space or
-+ # vm86-space
-+ xorl %edx, %edx
-+ call do_notify_resume
-+ jmp resume_userspace
-+
-+ ALIGN
-+work_notifysig_v86:
-+#ifdef CONFIG_VM86
-+ pushl %ecx # save ti_flags for do_notify_resume
-+ call save_v86_state # %eax contains pt_regs pointer
-+ popl %ecx
-+ movl %eax, %esp
-+ xorl %edx, %edx
-+ call do_notify_resume
-+ jmp resume_userspace
-+#endif
-+
-+ # perform syscall exit tracing
-+ ALIGN
-+syscall_trace_entry:
-+ movl $-ENOSYS,EAX(%esp)
-+ movl %esp, %eax
-+ xorl %edx,%edx
-+ call do_syscall_trace
-+ cmpl $0, %eax
-+ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
-+ # so must skip actual syscall
-+ movl ORIG_EAX(%esp), %eax
-+ cmpl $(nr_syscalls), %eax
-+ jnae syscall_call
-+ jmp syscall_exit
-+
-+ # perform syscall exit tracing
-+ ALIGN
-+syscall_exit_work:
-+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
-+ jz work_pending
-+ XEN_UNBLOCK_EVENTS(%esi) # could let do_syscall_trace() call
-+ # schedule() instead
-+ movl %esp, %eax
-+ movl $1, %edx
-+ call do_syscall_trace
-+ jmp resume_userspace
-+
-+ ALIGN
-+syscall_fault:
-+ pushl %eax # save orig_eax
-+ SAVE_ALL
-+ GET_THREAD_INFO(%ebp)
-+ movl $-EFAULT,EAX(%esp)
-+ jmp resume_userspace
-+
-+ ALIGN
-+syscall_badsys:
-+ movl $-ENOSYS,EAX(%esp)
-+ jmp resume_userspace
-+
-+#if 0 /* XEN */
-+#define FIXUP_ESPFIX_STACK \
-+ movl %esp, %eax; \
-+ /* switch to 32bit stack using the pointer on top of 16bit stack */ \
-+ lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
-+ /* copy data from 16bit stack to 32bit stack */ \
-+ call fixup_x86_bogus_stack; \
-+ /* put ESP to the proper location */ \
-+ movl %eax, %esp;
-+#define UNWIND_ESPFIX_STACK \
-+ pushl %eax; \
-+ movl %ss, %eax; \
-+ /* see if on 16bit stack */ \
-+ cmpw $__ESPFIX_SS, %ax; \
-+ jne 28f; \
-+ movl $__KERNEL_DS, %edx; \
-+ movl %edx, %ds; \
-+ movl %edx, %es; \
-+ /* switch to 32bit stack */ \
-+ FIXUP_ESPFIX_STACK \
-+28: popl %eax;
-+
-+/*
-+ * Build the entry stubs and pointer table with
-+ * some assembler magic.
-+ */
-+.data
-+ENTRY(interrupt)
-+.text
-+
-+vector=0
-+ENTRY(irq_entries_start)
-+.rept NR_IRQS
-+ ALIGN
-+1: pushl $vector-256
-+ jmp common_interrupt
-+.data
-+ .long 1b
-+.text
-+vector=vector+1
-+.endr
-+
-+ ALIGN
-+common_interrupt:
-+ SAVE_ALL
-+ movl %esp,%eax
-+ call do_IRQ
-+ jmp ret_from_intr
-+
-+#define BUILD_INTERRUPT(name, nr) \
-+ENTRY(name) \
-+ pushl $nr-256; \
-+ SAVE_ALL \
-+ movl %esp,%eax; \
-+ call smp_/**/name; \
-+ jmp ret_from_intr;
-+
-+/* The include is where all of the SMP etc. interrupts come from */
-+#include "entry_arch.h"
-+#endif /* XEN */
-+
-+ENTRY(divide_error)
-+ pushl $0 # no error code
-+ pushl $do_divide_error
-+ ALIGN
-+error_code:
-+ pushl %ds
-+ pushl %eax
-+ xorl %eax, %eax
-+ pushl %ebp
-+ pushl %edi
-+ pushl %esi
-+ pushl %edx
-+ decl %eax # eax = -1
-+ pushl %ecx
-+ pushl %ebx
-+ cld
-+ pushl %es
-+# UNWIND_ESPFIX_STACK
-+ popl %ecx
-+ movl ES(%esp), %edi # get the function address
-+ movl ORIG_EAX(%esp), %edx # get the error code
-+ movl %eax, ORIG_EAX(%esp)
-+ movl %ecx, ES(%esp)
-+ movl $(__USER_DS), %ecx
-+ movl %ecx, %ds
-+ movl %ecx, %es
-+ movl %esp,%eax # pt_regs pointer
-+ call *%edi
-+ jmp ret_from_exception
-+
-+# A note on the "critical region" in our callback handler.
-+# We want to avoid stacking callback handlers due to events occurring
-+# during handling of the last event. To do this, we keep events disabled
-+# until we've done all processing. HOWEVER, we must enable events before
-+# popping the stack frame (can't be done atomically) and so it would still
-+# be possible to get enough handler activations to overflow the stack.
-+# Although unlikely, bugs of that kind are hard to track down, so we'd
-+# like to avoid the possibility.
-+# So, on entry to the handler we detect whether we interrupted an
-+# existing activation in its critical region -- if so, we pop the current
-+# activation and restart the handler using the previous one.
-+ENTRY(hypervisor_callback)
-+ pushl %eax
-+ SAVE_ALL
-+ movl EIP(%esp),%eax
-+ cmpl $scrit,%eax
-+ jb 11f
-+ cmpl $ecrit,%eax
-+ jb critical_region_fixup
-+11: push %esp
-+ call evtchn_do_upcall
-+ add $4,%esp
-+ jmp ret_from_intr
-+
-+ ALIGN
-+restore_all_enable_events:
-+ XEN_LOCKED_UNBLOCK_EVENTS(%esi)
-+scrit: /**** START OF CRITICAL REGION ****/
-+ XEN_TEST_PENDING(%esi)
-+ jnz 14f # process more events if necessary...
-+ XEN_PUT_VCPU_INFO(%esi)
-+ RESTORE_ALL
-+14: XEN_LOCKED_BLOCK_EVENTS(%esi)
-+ XEN_PUT_VCPU_INFO(%esi)
-+ jmp 11b
-+ecrit: /**** END OF CRITICAL REGION ****/
-+# [How we do the fixup]. We want to merge the current stack frame with the
-+# just-interrupted frame. How we do this depends on where in the critical
-+# region the interrupted handler was executing, and so how many saved
-+# registers are in each frame. We do this quickly using the lookup table
-+# 'critical_fixup_table'. For each byte offset in the critical region, it
-+# provides the number of bytes which have already been popped from the
-+# interrupted stack frame.
-+critical_region_fixup:
-+ addl $critical_fixup_table-scrit,%eax
-+ movzbl (%eax),%eax # %eax contains num bytes popped
-+ cmpb $0xff,%al # 0xff => vcpu_info critical region
-+ jne 15f
-+ GET_THREAD_INFO(%ebp)
-+ XEN_PUT_VCPU_INFO(%esi) # abort vcpu_info critical region
-+ xorl %eax,%eax
-+15: mov %esp,%esi
-+ add %eax,%esi # %esi points at end of src region
-+ mov %esp,%edi
-+ add $0x34,%edi # %edi points at end of dst region
-+ mov %eax,%ecx
-+ shr $2,%ecx # convert words to bytes
-+ je 17f # skip loop if nothing to copy
-+16: subl $4,%esi # pre-decrementing copy loop
-+ subl $4,%edi
-+ movl (%esi),%eax
-+ movl %eax,(%edi)
-+ loop 16b
-+17: movl %edi,%esp # final %edi is top of merged stack
-+ jmp 11b
-+
-+critical_fixup_table:
-+ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = XEN_TEST_PENDING
-+ .byte 0xff,0xff # jnz 14f
-+ XEN_PUT_VCPU_INFO_fixup
-+ .byte 0x00 # pop %ebx
-+ .byte 0x04 # pop %ecx
-+ .byte 0x08 # pop %edx
-+ .byte 0x0c # pop %esi
-+ .byte 0x10 # pop %edi
-+ .byte 0x14 # pop %ebp
-+ .byte 0x18 # pop %eax
-+ .byte 0x1c # pop %ds
-+ .byte 0x20 # pop %es
-+ .byte 0x24,0x24,0x24 # add $4,%esp
-+ .byte 0x28 # iret
-+ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
-+ XEN_PUT_VCPU_INFO_fixup
-+ .byte 0x00,0x00 # jmp 11b
-+
-+# Hypervisor uses this for application faults while it executes.
-+ENTRY(failsafe_callback)
-+1: popl %ds
-+2: popl %es
-+3: popl %fs
-+4: popl %gs
-+ subl $4,%esp
-+ SAVE_ALL
-+ jmp ret_from_exception
-+.section .fixup,"ax"; \
-+6: movl $0,(%esp); \
-+ jmp 1b; \
-+7: movl $0,(%esp); \
-+ jmp 2b; \
-+8: movl $0,(%esp); \
-+ jmp 3b; \
-+9: movl $0,(%esp); \
-+ jmp 4b; \
-+.previous; \
-+.section __ex_table,"a";\
-+ .align 4; \
-+ .long 1b,6b; \
-+ .long 2b,7b; \
-+ .long 3b,8b; \
-+ .long 4b,9b; \
-+.previous
-+
-+ENTRY(coprocessor_error)
-+ pushl $0
-+ pushl $do_coprocessor_error
-+ jmp error_code
-+
-+ENTRY(simd_coprocessor_error)
-+ pushl $0
-+ pushl $do_simd_coprocessor_error
-+ jmp error_code
-+
-+ENTRY(device_not_available)
-+ pushl $-1 # mark this as an int
-+ SAVE_ALL
-+ #preempt_stop /* This is already an interrupt gate on Xen. */
-+ call math_state_restore
-+ jmp ret_from_exception
-+
-+#ifdef CONFIG_X86_SYSENTER
-+/*
-+ * Debug traps and NMI can happen at the one SYSENTER instruction
-+ * that sets up the real kernel stack. Check here, since we can't
-+ * allow the wrong stack to be used.
-+ *
-+ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
-+ * already pushed 3 words if it hits on the sysenter instruction:
-+ * eflags, cs and eip.
-+ *
-+ * We just load the right stack, and push the three (known) values
-+ * by hand onto the new stack - while updating the return eip past
-+ * the instruction that would have done it for sysenter.
-+ */
-+#define FIX_STACK(offset, ok, label) \
-+ cmpw $__KERNEL_CS,4(%esp); \
-+ jne ok; \
-+label: \
-+ movl TSS_sysenter_esp0+offset(%esp),%esp; \
-+ pushfl; \
-+ pushl $__KERNEL_CS; \
-+ pushl $sysenter_past_esp
-+#endif /* CONFIG_X86_SYSENTER */
-+
-+KPROBE_ENTRY(debug)
-+#ifdef CONFIG_X86_SYSENTER
-+ cmpl $sysenter_entry,(%esp)
-+ jne debug_stack_correct
-+ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
-+debug_stack_correct:
-+#endif /* !CONFIG_X86_SYSENTER */
-+ pushl $-1 # mark this as an int
-+ SAVE_ALL
-+ xorl %edx,%edx # error code 0
-+ movl %esp,%eax # pt_regs pointer
-+ call do_debug
-+ jmp ret_from_exception
-+ .previous .text
-+
-+ENTRY(nmi)
-+ pushl %eax
-+ SAVE_ALL
-+ xorl %edx,%edx # zero error code
-+ movl %esp,%eax # pt_regs pointer
-+ call do_nmi
-+ orl $NMI_MASK, EFLAGS(%esp)
-+ jmp restore_all
-+
-+#if 0 /* XEN */
-+/*
-+ * NMI is doubly nasty. It can happen _while_ we're handling
-+ * a debug fault, and the debug fault hasn't yet been able to
-+ * clear up the stack. So we first check whether we got an
-+ * NMI on the sysenter entry path, but after that we need to
-+ * check whether we got an NMI on the debug path where the debug
-+ * fault happened on the sysenter path.
-+ */
-+ENTRY(nmi)
-+ pushl %eax
-+ movl %ss, %eax
-+ cmpw $__ESPFIX_SS, %ax
-+ popl %eax
-+ je nmi_16bit_stack
-+ cmpl $sysenter_entry,(%esp)
-+ je nmi_stack_fixup
-+ pushl %eax
-+ movl %esp,%eax
-+ /* Do not access memory above the end of our stack page,
-+ * it might not exist.
-+ */
-+ andl $(THREAD_SIZE-1),%eax
-+ cmpl $(THREAD_SIZE-20),%eax
-+ popl %eax
-+ jae nmi_stack_correct
-+ cmpl $sysenter_entry,12(%esp)
-+ je nmi_debug_stack_check
-+nmi_stack_correct:
-+ pushl %eax
-+ SAVE_ALL
-+ xorl %edx,%edx # zero error code
-+ movl %esp,%eax # pt_regs pointer
-+ call do_nmi
-+ jmp restore_all
-+
-+nmi_stack_fixup:
-+ FIX_STACK(12,nmi_stack_correct, 1)
-+ jmp nmi_stack_correct
-+nmi_debug_stack_check:
-+ cmpw $__KERNEL_CS,16(%esp)
-+ jne nmi_stack_correct
-+ cmpl $debug,(%esp)
-+ jb nmi_stack_correct
-+ cmpl $debug_esp_fix_insn,(%esp)
-+ ja nmi_stack_correct
-+ FIX_STACK(24,nmi_stack_correct, 1)
-+ jmp nmi_stack_correct
-+
-+nmi_16bit_stack:
-+ /* create the pointer to lss back */
-+ pushl %ss
-+ pushl %esp
-+ movzwl %sp, %esp
-+ addw $4, (%esp)
-+ /* copy the iret frame of 12 bytes */
-+ .rept 3
-+ pushl 16(%esp)
-+ .endr
-+ pushl %eax
-+ SAVE_ALL
-+ FIXUP_ESPFIX_STACK # %eax == %esp
-+ xorl %edx,%edx # zero error code
-+ call do_nmi
-+ RESTORE_REGS
-+ lss 12+4(%esp), %esp # back to 16bit stack
-+1: iret
-+.section __ex_table,"a"
-+ .align 4
-+ .long 1b,iret_exc
-+.previous
-+#endif /* XEN */
-+
-+KPROBE_ENTRY(int3)
-+ pushl $-1 # mark this as an int
-+ SAVE_ALL
-+ xorl %edx,%edx # zero error code
-+ movl %esp,%eax # pt_regs pointer
-+ call do_int3
-+ jmp ret_from_exception
-+ .previous .text
-+
-+ENTRY(overflow)
-+ pushl $0
-+ pushl $do_overflow
-+ jmp error_code
-+
-+ENTRY(bounds)
-+ pushl $0
-+ pushl $do_bounds
-+ jmp error_code
-+
-+ENTRY(invalid_op)
-+ pushl $0
-+ pushl $do_invalid_op
-+ jmp error_code
-+
-+ENTRY(coprocessor_segment_overrun)
-+ pushl $0
-+ pushl $do_coprocessor_segment_overrun
-+ jmp error_code
-+
-+ENTRY(invalid_TSS)
-+ pushl $do_invalid_TSS
-+ jmp error_code
-+
-+ENTRY(segment_not_present)
-+ pushl $do_segment_not_present
-+ jmp error_code
-+
-+ENTRY(stack_segment)
-+ pushl $do_stack_segment
-+ jmp error_code
-+
-+KPROBE_ENTRY(general_protection)
-+ pushl $do_general_protection
-+ jmp error_code
-+ .previous .text
-+
-+ENTRY(alignment_check)
-+ pushl $do_alignment_check
-+ jmp error_code
-+
-+KPROBE_ENTRY(page_fault)
-+ pushl $do_page_fault
-+ jmp error_code
-+ .previous .text
-+
-+#ifdef CONFIG_X86_MCE
-+ENTRY(machine_check)
-+ pushl $0
-+ pushl machine_check_vector
-+ jmp error_code
-+#endif
-+
-+ENTRY(fixup_4gb_segment)
-+ pushl $do_fixup_4gb_segment
-+ jmp error_code
-+
-+.section .rodata,"a"
-+#include "syscall_table.S"
-+
-+syscall_table_size=(.-sys_call_table)
-diff --git a/arch/i386/kernel/fixup.c b/arch/i386/kernel/fixup.c
-new file mode 100644
-index 0000000..5188b23
---- /dev/null
-+++ b/arch/i386/kernel/fixup.c
-@@ -0,0 +1,95 @@
-+/******************************************************************************
-+ * fixup.c
-+ *
-+ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
-+ * Used to avoid repeated slow emulation of common instructions used by the
-+ * user-space TLS (Thread-Local Storage) libraries.
-+ *
-+ * **** NOTE ****
-+ * Issues with the binary rewriting have caused it to be removed. Instead
-+ * we rely on Xen's emulator to boot the kernel, and then print a banner
-+ * message recommending that the user disables /lib/tls.
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/kernel.h>
-+#include <linux/delay.h>
-+#include <linux/version.h>
-+
-+#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
-+
-+fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
-+{
-+ static unsigned long printed = 0;
-+ char info[100];
-+ int i;
-+
-+ if (test_and_set_bit(0, &printed))
-+ return;
-+
-+ HYPERVISOR_vm_assist(
-+ VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
-+
-+ sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
-+
-+
-+ DP("");
-+ DP("***************************************************************");
-+ DP("***************************************************************");
-+ DP("** WARNING: Currently emulating unsupported memory accesses **");
-+ DP("** in /lib/tls glibc libraries. The emulation is **");
-+ DP("** slow. To ensure full performance you should **");
-+ DP("** install a 'xen-friendly' (nosegneg) version of **");
-+ DP("** the library, or disable tls support by executing **");
-+ DP("** the following as root: **");
-+ DP("** mv /lib/tls /lib/tls.disabled **");
-+ DP("** Offending process: %-38.38s **", info);
-+ DP("***************************************************************");
-+ DP("***************************************************************");
-+ DP("");
-+
-+ for (i = 5; i > 0; i--) {
-+ printk("Pausing... %d", i);
-+ mdelay(1000);
-+ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
-+ }
-+
-+ printk("Continuing...\n\n");
-+}
-+
-+static int __init fixup_init(void)
-+{
-+ HYPERVISOR_vm_assist(
-+ VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
-+ return 0;
-+}
-+__initcall(fixup_init);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/arch/i386/kernel/head-xen.S b/arch/i386/kernel/head-xen.S
-new file mode 100644
-index 0000000..3032a00
---- /dev/null
-+++ b/arch/i386/kernel/head-xen.S
-@@ -0,0 +1,171 @@
-+
-+
-+.text
-+#include <linux/config.h>
-+#include <linux/threads.h>
-+#include <linux/linkage.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/thread_info.h>
-+#include <asm/asm-offsets.h>
-+#include <xen/interface/arch-x86_32.h>
-+
-+/*
-+ * References to members of the new_cpu_data structure.
-+ */
-+
-+#define X86 new_cpu_data+CPUINFO_x86
-+#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
-+#define X86_MODEL new_cpu_data+CPUINFO_x86_model
-+#define X86_MASK new_cpu_data+CPUINFO_x86_mask
-+#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
-+#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
-+#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
-+#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
-+
-+ENTRY(startup_32)
-+ movl %esi,xen_start_info
-+ cld
-+
-+ /* Set up the stack pointer */
-+ movl $(init_thread_union+THREAD_SIZE),%esp
-+
-+ /* get vendor info */
-+ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
-+ cpuid
-+ movl %eax,X86_CPUID # save CPUID level
-+ movl %ebx,X86_VENDOR_ID # lo 4 chars
-+ movl %edx,X86_VENDOR_ID+4 # next 4 chars
-+ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
-+
-+ movl $1,%eax # Use the CPUID instruction to get CPU type
-+ cpuid
-+ movb %al,%cl # save reg for future use
-+ andb $0x0f,%ah # mask processor family
-+ movb %ah,X86
-+ andb $0xf0,%al # mask model
-+ shrb $4,%al
-+ movb %al,X86_MODEL
-+ andb $0x0f,%cl # mask mask revision
-+ movb %cl,X86_MASK
-+ movl %edx,X86_CAPABILITY
-+
-+ movb $1,X86_HARD_MATH
-+
-+ xorl %eax,%eax # Clear FS/GS and LDT
-+ movl %eax,%fs
-+ movl %eax,%gs
-+ cld # gcc2 wants the direction flag cleared at all times
-+
-+ call start_kernel
-+L6:
-+ jmp L6 # main should never return here, but
-+ # just in case, we know what happens.
-+
-+#define HYPERCALL_PAGE_OFFSET 0x1000
-+.org HYPERCALL_PAGE_OFFSET
-+ENTRY(hypercall_page)
-+.skip 0x1000
-+
-+/*
-+ * Real beginning of normal "text" segment
-+ */
-+ENTRY(stext)
-+ENTRY(_stext)
-+
-+/*
-+ * BSS section
-+ */
-+.section ".bss.page_aligned","w"
-+ENTRY(empty_zero_page)
-+ .fill 4096,1,0
-+
-+/*
-+ * This starts the data section.
-+ */
-+.data
-+
-+/*
-+ * The Global Descriptor Table contains 28 quadwords, per-CPU.
-+ */
-+ENTRY(cpu_gdt_table)
-+ .quad 0x0000000000000000 /* NULL descriptor */
-+ .quad 0x0000000000000000 /* 0x0b reserved */
-+ .quad 0x0000000000000000 /* 0x13 reserved */
-+ .quad 0x0000000000000000 /* 0x1b reserved */
-+ .quad 0x0000000000000000 /* 0x20 unused */
-+ .quad 0x0000000000000000 /* 0x28 unused */
-+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
-+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
-+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
-+ .quad 0x0000000000000000 /* 0x4b reserved */
-+ .quad 0x0000000000000000 /* 0x53 reserved */
-+ .quad 0x0000000000000000 /* 0x5b reserved */
-+
-+ .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
-+ .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
-+ .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
-+ .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
-+
-+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
-+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
-+
-+ /*
-+ * Segments used for calling PnP BIOS have byte granularity.
-+ * They code segments and data segments have fixed 64k limits,
-+ * the transfer segment sizes are set at run time.
-+ */
-+ .quad 0x0000000000000000 /* 0x90 32-bit code */
-+ .quad 0x0000000000000000 /* 0x98 16-bit code */
-+ .quad 0x0000000000000000 /* 0xa0 16-bit data */
-+ .quad 0x0000000000000000 /* 0xa8 16-bit data */
-+ .quad 0x0000000000000000 /* 0xb0 16-bit data */
-+
-+ /*
-+ * The APM segments have byte granularity and their bases
-+ * are set at run time. All have 64k limits.
-+ */
-+ .quad 0x0000000000000000 /* 0xb8 APM CS code */
-+ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
-+ .quad 0x0000000000000000 /* 0xc8 APM DS data */
-+
-+ .quad 0x0000000000000000 /* 0xd0 - ESPFIX 16-bit SS */
-+ .quad 0x0000000000000000 /* 0xd8 - unused */
-+ .quad 0x0000000000000000 /* 0xe0 - unused */
-+ .quad 0x0000000000000000 /* 0xe8 - unused */
-+ .quad 0x0000000000000000 /* 0xf0 - unused */
-+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
-+
-+/*
-+ * __xen_guest information
-+ */
-+.macro utoa value
-+ .if (\value) < 0 || (\value) >= 0x10
-+ utoa (((\value)>>4)&0x0fffffff)
-+ .endif
-+ .if ((\value) & 0xf) < 10
-+ .byte '0' + ((\value) & 0xf)
-+ .else
-+ .byte 'A' + ((\value) & 0xf) - 10
-+ .endif
-+.endm
-+
-+.section __xen_guest
-+ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
-+ .ascii ",XEN_VER=xen-3.0"
-+ .ascii ",VIRT_BASE=0x"
-+ utoa __PAGE_OFFSET
-+ .ascii ",HYPERCALL_PAGE=0x"
-+ utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
-+ .ascii ",FEATURES=writable_page_tables"
-+ .ascii "|writable_descriptor_tables"
-+ .ascii "|auto_translated_physmap"
-+ .ascii "|pae_pgdir_above_4gb"
-+ .ascii "|supervisor_mode_kernel"
-+#ifdef CONFIG_X86_PAE
-+ .ascii ",PAE=yes"
-+#else
-+ .ascii ",PAE=no"
-+#endif
-+ .ascii ",LOADER=generic"
-+ .byte 0
-diff --git a/arch/i386/kernel/init_task-xen.c b/arch/i386/kernel/init_task-xen.c
-new file mode 100644
-index 0000000..c4da1cc
---- /dev/null
-+++ b/arch/i386/kernel/init_task-xen.c
-@@ -0,0 +1,51 @@
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/init.h>
-+#include <linux/init_task.h>
-+#include <linux/fs.h>
-+#include <linux/mqueue.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/desc.h>
-+
-+static struct fs_struct init_fs = INIT_FS;
-+static struct files_struct init_files = INIT_FILES;
-+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-+
-+#define swapper_pg_dir ((pgd_t *)NULL)
-+struct mm_struct init_mm = INIT_MM(init_mm);
-+#undef swapper_pg_dir
-+
-+EXPORT_SYMBOL(init_mm);
-+
-+/*
-+ * Initial thread structure.
-+ *
-+ * We need to make sure that this is THREAD_SIZE aligned due to the
-+ * way process stacks are handled. This is done by having a special
-+ * "init_task" linker map entry..
-+ */
-+union thread_union init_thread_union
-+ __attribute__((__section__(".data.init_task"))) =
-+ { INIT_THREAD_INFO(init_task) };
-+
-+/*
-+ * Initial task structure.
-+ *
-+ * All other task structs will be allocated on slabs in fork.c
-+ */
-+struct task_struct init_task = INIT_TASK(init_task);
-+
-+EXPORT_SYMBOL(init_task);
-+
-+#ifndef CONFIG_X86_NO_TSS
-+/*
-+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
-+ * no more per-task TSS's.
-+ */
-+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
-+#endif
-+
-diff --git a/arch/i386/kernel/io_apic-xen.c b/arch/i386/kernel/io_apic-xen.c
-new file mode 100644
-index 0000000..5ef6513
---- /dev/null
-+++ b/arch/i386/kernel/io_apic-xen.c
-@@ -0,0 +1,2746 @@
-+/*
-+ * Intel IO-APIC support for multi-Pentium hosts.
-+ *
-+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
-+ *
-+ * Many thanks to Stig Venaas for trying out countless experimental
-+ * patches and reporting/debugging problems patiently!
-+ *
-+ * (c) 1999, Multiple IO-APIC support, developed by
-+ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
-+ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
-+ * further tested and cleaned up by Zach Brown <zab@redhat.com>
-+ * and Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
-+ * thanks to Eric Gilmore
-+ * and Rolf G. Tews
-+ * for testing these extensively
-+ * Paul Diefenbaugh : Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+#include <linux/config.h>
-+#include <linux/smp_lock.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/compiler.h>
-+#include <linux/acpi.h>
-+#include <linux/module.h>
-+#include <linux/sysdev.h>
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/desc.h>
-+#include <asm/timer.h>
-+#include <asm/i8259.h>
-+
-+#include <mach_apic.h>
-+
-+#include "io_ports.h"
-+
-+#ifdef CONFIG_XEN
-+
-+#include <xen/interface/xen.h>
-+#include <xen/interface/physdev.h>
-+
-+/* Fake i8259 */
-+#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
-+#define disable_8259A_irq(_irq) ((void)0)
-+#define i8259A_irq_pending(_irq) (0)
-+
-+unsigned long io_apic_irqs;
-+
-+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
-+{
-+ physdev_op_t op;
-+ int ret;
-+
-+ op.cmd = PHYSDEVOP_APIC_READ;
-+ op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+ op.u.apic_op.reg = reg;
-+ ret = HYPERVISOR_physdev_op(&op);
-+ if (ret)
-+ return ret;
-+ return op.u.apic_op.value;
-+}
-+
-+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+ physdev_op_t op;
-+
-+ op.cmd = PHYSDEVOP_APIC_WRITE;
-+ op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+ op.u.apic_op.reg = reg;
-+ op.u.apic_op.value = value;
-+ HYPERVISOR_physdev_op(&op);
-+}
-+
-+#define io_apic_read(a,r) xen_io_apic_read(a,r)
-+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
-+
-+#endif /* CONFIG_XEN */
-+
-+int (*ioapic_renumber_irq)(int ioapic, int irq);
-+atomic_t irq_mis_count;
-+
-+/* Where if anywhere is the i8259 connect in external int mode */
-+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-+
-+static DEFINE_SPINLOCK(ioapic_lock);
-+
-+int timer_over_8254 __initdata = 1;
-+
-+/*
-+ * Is the SiS APIC rmw bug present ?
-+ * -1 = don't know, 0 = no, 1 = yes
-+ */
-+int sis_apic_bug = -1;
-+
-+/*
-+ * # of IRQ routing registers
-+ */
-+int nr_ioapic_registers[MAX_IO_APICS];
-+
-+int disable_timer_pin_1 __initdata;
-+
-+/*
-+ * Rough estimation of how many shared IRQs there are, can
-+ * be changed anytime.
-+ */
-+#define MAX_PLUS_SHARED_IRQS NR_IRQS
-+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
-+
-+/*
-+ * This is performance-critical, we want to do it O(1)
-+ *
-+ * the indexing order of this array favors 1:1 mappings
-+ * between pins and IRQs.
-+ */
-+
-+static struct irq_pin_list {
-+ int apic, pin, next;
-+} irq_2_pin[PIN_MAP_SIZE];
-+
-+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
-+#ifdef CONFIG_PCI_MSI
-+#define vector_to_irq(vector) \
-+ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
-+#else
-+#define vector_to_irq(vector) (vector)
-+#endif
-+
-+/*
-+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
-+ * shared ISA-space IRQs, so we have to support them. We are super
-+ * fast in the common case, and fast for shared ISA-space IRQs.
-+ */
-+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-+{
-+ static int first_free_entry = NR_IRQS;
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+ while (entry->next)
-+ entry = irq_2_pin + entry->next;
-+
-+ if (entry->pin != -1) {
-+ entry->next = first_free_entry;
-+ entry = irq_2_pin + entry->next;
-+ if (++first_free_entry >= PIN_MAP_SIZE)
-+ panic("io_apic.c: whoops");
-+ }
-+ entry->apic = apic;
-+ entry->pin = pin;
-+}
-+
-+#ifdef CONFIG_XEN
-+#define clear_IO_APIC() ((void)0)
-+#else
-+/*
-+ * Reroute an IRQ to a different pin.
-+ */
-+static void __init replace_pin_at_irq(unsigned int irq,
-+ int oldapic, int oldpin,
-+ int newapic, int newpin)
-+{
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+ while (1) {
-+ if (entry->apic == oldapic && entry->pin == oldpin) {
-+ entry->apic = newapic;
-+ entry->pin = newpin;
-+ }
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+}
-+
-+static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
-+{
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+ unsigned int pin, reg;
-+
-+ for (;;) {
-+ pin = entry->pin;
-+ if (pin == -1)
-+ break;
-+ reg = io_apic_read(entry->apic, 0x10 + pin*2);
-+ reg &= ~disable;
-+ reg |= enable;
-+ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+}
-+
-+/* mask = 1 */
-+static void __mask_IO_APIC_irq (unsigned int irq)
-+{
-+ __modify_IO_APIC_irq(irq, 0x00010000, 0);
-+}
-+
-+/* mask = 0 */
-+static void __unmask_IO_APIC_irq (unsigned int irq)
-+{
-+ __modify_IO_APIC_irq(irq, 0, 0x00010000);
-+}
-+
-+/* mask = 1, trigger = 0 */
-+static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
-+{
-+ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
-+}
-+
-+/* mask = 0, trigger = 1 */
-+static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
-+{
-+ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
-+}
-+
-+static void mask_IO_APIC_irq (unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __mask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void unmask_IO_APIC_irq (unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __unmask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-+{
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ if (entry.delivery_mode == dest_SMI)
-+ return;
-+
-+ /*
-+ * Disable it in the IO-APIC irq-routing table:
-+ */
-+ memset(&entry, 0, sizeof(entry));
-+ entry.mask = 1;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
-+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC (void)
-+{
-+ int apic, pin;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++)
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-+ clear_IO_APIC_pin(apic, pin);
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
-+{
-+ unsigned long flags;
-+ int pin;
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+ unsigned int apicid_value;
-+ cpumask_t tmp;
-+
-+ cpus_and(tmp, cpumask, cpu_online_map);
-+ if (cpus_empty(tmp))
-+ tmp = TARGET_CPUS;
-+
-+ cpus_and(cpumask, tmp, CPU_MASK_ALL);
-+
-+ apicid_value = cpu_mask_to_apicid(cpumask);
-+ /* Prepare to do the io_apic_write */
-+ apicid_value = apicid_value << 24;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ for (;;) {
-+ pin = entry->pin;
-+ if (pin == -1)
-+ break;
-+ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+ set_irq_info(irq, cpumask);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+#if defined(CONFIG_IRQBALANCE)
-+# include <asm/processor.h> /* kernel_thread() */
-+# include <linux/kernel_stat.h> /* kstat */
-+# include <linux/slab.h> /* kmalloc() */
-+# include <linux/timer.h> /* time_after() */
-+
-+# ifdef CONFIG_BALANCED_IRQ_DEBUG
-+# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
-+# define Dprintk(x...) do { TDprintk(x); } while (0)
-+# else
-+# define TDprintk(x...)
-+# define Dprintk(x...)
-+# endif
-+
-+
-+#define IRQBALANCE_CHECK_ARCH -999
-+static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
-+static int physical_balance = 0;
-+
-+static struct irq_cpu_info {
-+ unsigned long * last_irq;
-+ unsigned long * irq_delta;
-+ unsigned long irq;
-+} irq_cpu_data[NR_CPUS];
-+
-+#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
-+#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
-+#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
-+
-+#define IDLE_ENOUGH(cpu,now) \
-+ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
-+
-+#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
-+
-+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
-+
-+#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
-+#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
-+#define BALANCED_IRQ_MORE_DELTA (HZ/10)
-+#define BALANCED_IRQ_LESS_DELTA (HZ)
-+
-+static long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
-+
-+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
-+ unsigned long now, int direction)
-+{
-+ int search_idle = 1;
-+ int cpu = curr_cpu;
-+
-+ goto inside;
-+
-+ do {
-+ if (unlikely(cpu == curr_cpu))
-+ search_idle = 0;
-+inside:
-+ if (direction == 1) {
-+ cpu++;
-+ if (cpu >= NR_CPUS)
-+ cpu = 0;
-+ } else {
-+ cpu--;
-+ if (cpu == -1)
-+ cpu = NR_CPUS-1;
-+ }
-+ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
-+ (search_idle && !IDLE_ENOUGH(cpu,now)));
-+
-+ return cpu;
-+}
-+
-+static inline void balance_irq(int cpu, int irq)
-+{
-+ unsigned long now = jiffies;
-+ cpumask_t allowed_mask;
-+ unsigned int new_cpu;
-+
-+ if (irqbalance_disabled)
-+ return;
-+
-+ cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
-+ new_cpu = move(cpu, allowed_mask, now, 1);
-+ if (cpu != new_cpu) {
-+ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
-+ }
-+}
-+
-+static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
-+{
-+ int i, j;
-+ Dprintk("Rotating IRQs among CPUs.\n");
-+ for (i = 0; i < NR_CPUS; i++) {
-+ for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
-+ if (!irq_desc[j].action)
-+ continue;
-+ /* Is it a significant load ? */
-+ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
-+ useful_load_threshold)
-+ continue;
-+ balance_irq(i, j);
-+ }
-+ }
-+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
-+ return;
-+}
-+
-+static void do_irq_balance(void)
-+{
-+ int i, j;
-+ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
-+ unsigned long move_this_load = 0;
-+ int max_loaded = 0, min_loaded = 0;
-+ int load;
-+ unsigned long useful_load_threshold = balanced_irq_interval + 10;
-+ int selected_irq;
-+ int tmp_loaded, first_attempt = 1;
-+ unsigned long tmp_cpu_irq;
-+ unsigned long imbalance = 0;
-+ cpumask_t allowed_mask, target_cpu_mask, tmp;
-+
-+ for (i = 0; i < NR_CPUS; i++) {
-+ int package_index;
-+ CPU_IRQ(i) = 0;
-+ if (!cpu_online(i))
-+ continue;
-+ package_index = CPU_TO_PACKAGEINDEX(i);
-+ for (j = 0; j < NR_IRQS; j++) {
-+ unsigned long value_now, delta;
-+ /* Is this an active IRQ? */
-+ if (!irq_desc[j].action)
-+ continue;
-+ if ( package_index == i )
-+ IRQ_DELTA(package_index,j) = 0;
-+ /* Determine the total count per processor per IRQ */
-+ value_now = (unsigned long) kstat_cpu(i).irqs[j];
-+
-+ /* Determine the activity per processor per IRQ */
-+ delta = value_now - LAST_CPU_IRQ(i,j);
-+
-+ /* Update last_cpu_irq[][] for the next time */
-+ LAST_CPU_IRQ(i,j) = value_now;
-+
-+ /* Ignore IRQs whose rate is less than the clock */
-+ if (delta < useful_load_threshold)
-+ continue;
-+ /* update the load for the processor or package total */
-+ IRQ_DELTA(package_index,j) += delta;
-+
-+ /* Keep track of the higher numbered sibling as well */
-+ if (i != package_index)
-+ CPU_IRQ(i) += delta;
-+ /*
-+ * We have sibling A and sibling B in the package
-+ *
-+ * cpu_irq[A] = load for cpu A + load for cpu B
-+ * cpu_irq[B] = load for cpu B
-+ */
-+ CPU_IRQ(package_index) += delta;
-+ }
-+ }
-+ /* Find the least loaded processor package */
-+ for (i = 0; i < NR_CPUS; i++) {
-+ if (!cpu_online(i))
-+ continue;
-+ if (i != CPU_TO_PACKAGEINDEX(i))
-+ continue;
-+ if (min_cpu_irq > CPU_IRQ(i)) {
-+ min_cpu_irq = CPU_IRQ(i);
-+ min_loaded = i;
-+ }
-+ }
-+ max_cpu_irq = ULONG_MAX;
-+
-+tryanothercpu:
-+ /* Look for heaviest loaded processor.
-+ * We may come back to get the next heaviest loaded processor.
-+ * Skip processors with trivial loads.
-+ */
-+ tmp_cpu_irq = 0;
-+ tmp_loaded = -1;
-+ for (i = 0; i < NR_CPUS; i++) {
-+ if (!cpu_online(i))
-+ continue;
-+ if (i != CPU_TO_PACKAGEINDEX(i))
-+ continue;
-+ if (max_cpu_irq <= CPU_IRQ(i))
-+ continue;
-+ if (tmp_cpu_irq < CPU_IRQ(i)) {
-+ tmp_cpu_irq = CPU_IRQ(i);
-+ tmp_loaded = i;
-+ }
-+ }
-+
-+ if (tmp_loaded == -1) {
-+ /* In the case of small number of heavy interrupt sources,
-+ * loading some of the cpus too much. We use Ingo's original
-+ * approach to rotate them around.
-+ */
-+ if (!first_attempt && imbalance >= useful_load_threshold) {
-+ rotate_irqs_among_cpus(useful_load_threshold);
-+ return;
-+ }
-+ goto not_worth_the_effort;
-+ }
-+
-+ first_attempt = 0; /* heaviest search */
-+ max_cpu_irq = tmp_cpu_irq; /* load */
-+ max_loaded = tmp_loaded; /* processor */
-+ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
-+
-+ Dprintk("max_loaded cpu = %d\n", max_loaded);
-+ Dprintk("min_loaded cpu = %d\n", min_loaded);
-+ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
-+ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
-+ Dprintk("load imbalance = %lu\n", imbalance);
-+
-+ /* if imbalance is less than approx 10% of max load, then
-+ * observe diminishing returns action. - quit
-+ */
-+ if (imbalance < (max_cpu_irq >> 3)) {
-+ Dprintk("Imbalance too trivial\n");
-+ goto not_worth_the_effort;
-+ }
-+
-+tryanotherirq:
-+ /* if we select an IRQ to move that can't go where we want, then
-+ * see if there is another one to try.
-+ */
-+ move_this_load = 0;
-+ selected_irq = -1;
-+ for (j = 0; j < NR_IRQS; j++) {
-+ /* Is this an active IRQ? */
-+ if (!irq_desc[j].action)
-+ continue;
-+ if (imbalance <= IRQ_DELTA(max_loaded,j))
-+ continue;
-+ /* Try to find the IRQ that is closest to the imbalance
-+ * without going over.
-+ */
-+ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
-+ move_this_load = IRQ_DELTA(max_loaded,j);
-+ selected_irq = j;
-+ }
-+ }
-+ if (selected_irq == -1) {
-+ goto tryanothercpu;
-+ }
-+
-+ imbalance = move_this_load;
-+
-+ /* For physical_balance case, we accumlated both load
-+ * values in the one of the siblings cpu_irq[],
-+ * to use the same code for physical and logical processors
-+ * as much as possible.
-+ *
-+ * NOTE: the cpu_irq[] array holds the sum of the load for
-+ * sibling A and sibling B in the slot for the lowest numbered
-+ * sibling (A), _AND_ the load for sibling B in the slot for
-+ * the higher numbered sibling.
-+ *
-+ * We seek the least loaded sibling by making the comparison
-+ * (A+B)/2 vs B
-+ */
-+ load = CPU_IRQ(min_loaded) >> 1;
-+ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
-+ if (load > CPU_IRQ(j)) {
-+ /* This won't change cpu_sibling_map[min_loaded] */
-+ load = CPU_IRQ(j);
-+ min_loaded = j;
-+ }
-+ }
-+
-+ cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]);
-+ target_cpu_mask = cpumask_of_cpu(min_loaded);
-+ cpus_and(tmp, target_cpu_mask, allowed_mask);
-+
-+ if (!cpus_empty(tmp)) {
-+
-+ Dprintk("irq = %d moved to cpu = %d\n",
-+ selected_irq, min_loaded);
-+ /* mark for change destination */
-+ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
-+
-+ /* Since we made a change, come back sooner to
-+ * check for more variation.
-+ */
-+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
-+ return;
-+ }
-+ goto tryanotherirq;
-+
-+not_worth_the_effort:
-+ /*
-+ * if we did not find an IRQ to move, then adjust the time interval
-+ * upward
-+ */
-+ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
-+ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
-+ Dprintk("IRQ worth rotating not found\n");
-+ return;
-+}
-+
-+static int balanced_irq(void *unused)
-+{
-+ int i;
-+ unsigned long prev_balance_time = jiffies;
-+ long time_remaining = balanced_irq_interval;
-+
-+ daemonize("kirqd");
-+
-+ /* push everything to CPU 0 to give us a starting point. */
-+ for (i = 0 ; i < NR_IRQS ; i++) {
-+ pending_irq_cpumask[i] = cpumask_of_cpu(0);
-+ set_pending_irq(i, cpumask_of_cpu(0));
-+ }
-+
-+ for ( ; ; ) {
-+ time_remaining = schedule_timeout_interruptible(time_remaining);
-+ try_to_freeze();
-+ if (time_after(jiffies,
-+ prev_balance_time+balanced_irq_interval)) {
-+ preempt_disable();
-+ do_irq_balance();
-+ prev_balance_time = jiffies;
-+ time_remaining = balanced_irq_interval;
-+ preempt_enable();
-+ }
-+ }
-+ return 0;
-+}
-+
-+static int __init balanced_irq_init(void)
-+{
-+ int i;
-+ struct cpuinfo_x86 *c;
-+ cpumask_t tmp;
-+
-+ cpus_shift_right(tmp, cpu_online_map, 2);
-+ c = &boot_cpu_data;
-+ /* When not overwritten by the command line ask subarchitecture. */
-+ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
-+ irqbalance_disabled = NO_BALANCE_IRQ;
-+ if (irqbalance_disabled)
-+ return 0;
-+
-+ /* disable irqbalance completely if there is only one processor online */
-+ if (num_online_cpus() < 2) {
-+ irqbalance_disabled = 1;
-+ return 0;
-+ }
-+ /*
-+ * Enable physical balance only if more than 1 physical processor
-+ * is present
-+ */
-+ if (smp_num_siblings > 1 && !cpus_empty(tmp))
-+ physical_balance = 1;
-+
-+ for (i = 0; i < NR_CPUS; i++) {
-+ if (!cpu_online(i))
-+ continue;
-+ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-+ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-+ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
-+ printk(KERN_ERR "balanced_irq_init: out of memory");
-+ goto failed;
-+ }
-+ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
-+ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
-+ }
-+
-+ printk(KERN_INFO "Starting balanced_irq\n");
-+ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
-+ return 0;
-+ else
-+ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
-+failed:
-+ for (i = 0; i < NR_CPUS; i++) {
-+ kfree(irq_cpu_data[i].irq_delta);
-+ kfree(irq_cpu_data[i].last_irq);
-+ }
-+ return 0;
-+}
-+
-+int __init irqbalance_disable(char *str)
-+{
-+ irqbalance_disabled = 1;
-+ return 0;
-+}
-+
-+__setup("noirqbalance", irqbalance_disable);
-+
-+late_initcall(balanced_irq_init);
-+#endif /* CONFIG_IRQBALANCE */
-+#endif /* CONFIG_SMP */
-+#endif
-+
-+#ifndef CONFIG_SMP
-+void fastcall send_IPI_self(int vector)
-+{
-+#ifndef CONFIG_XEN
-+ unsigned int cfg;
-+
-+ /*
-+ * Wait for idle.
-+ */
-+ apic_wait_icr_idle();
-+ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
-+ /*
-+ * Send the IPI. The write to APIC_ICR fires this off.
-+ */
-+ apic_write_around(APIC_ICR, cfg);
-+#endif
-+}
-+#endif /* !CONFIG_SMP */
-+
-+
-+/*
-+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
-+ * specific CPU-side IRQs.
-+ */
-+
-+#define MAX_PIRQS 8
-+static int pirq_entries [MAX_PIRQS];
-+static int pirqs_enabled;
-+int skip_ioapic_setup;
-+
-+static int __init ioapic_setup(char *str)
-+{
-+ skip_ioapic_setup = 1;
-+ return 1;
-+}
-+
-+__setup("noapic", ioapic_setup);
-+
-+static int __init ioapic_pirq_setup(char *str)
-+{
-+ int i, max;
-+ int ints[MAX_PIRQS+1];
-+
-+ get_options(str, ARRAY_SIZE(ints), ints);
-+
-+ for (i = 0; i < MAX_PIRQS; i++)
-+ pirq_entries[i] = -1;
-+
-+ pirqs_enabled = 1;
-+ apic_printk(APIC_VERBOSE, KERN_INFO
-+ "PIRQ redirection, working around broken MP-BIOS.\n");
-+ max = MAX_PIRQS;
-+ if (ints[0] < MAX_PIRQS)
-+ max = ints[0];
-+
-+ for (i = 0; i < max; i++) {
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG
-+ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-+ /*
-+ * PIRQs are mapped upside down, usually.
-+ */
-+ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-+ }
-+ return 1;
-+}
-+
-+__setup("pirq=", ioapic_pirq_setup);
-+
-+/*
-+ * Find the IRQ entry number of a certain pin.
-+ */
-+static int find_irq_entry(int apic, int pin, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++)
-+ if (mp_irqs[i].mpc_irqtype == type &&
-+ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
-+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
-+ mp_irqs[i].mpc_dstirq == pin)
-+ return i;
-+
-+ return -1;
-+}
-+
-+/*
-+ * Find the pin to which IRQ[irq] (ISA) is connected
-+ */
-+static int __init find_isa_irq_pin(int irq, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
-+ ) &&
-+ (mp_irqs[i].mpc_irqtype == type) &&
-+ (mp_irqs[i].mpc_srcbusirq == irq))
-+
-+ return mp_irqs[i].mpc_dstirq;
-+ }
-+ return -1;
-+}
-+
-+static int __init find_isa_irq_apic(int irq, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
-+ ) &&
-+ (mp_irqs[i].mpc_irqtype == type) &&
-+ (mp_irqs[i].mpc_srcbusirq == irq))
-+ break;
-+ }
-+ if (i < mp_irq_entries) {
-+ int apic;
-+ for(apic = 0; apic < nr_ioapics; apic++) {
-+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
-+ return apic;
-+ }
-+ }
-+
-+ return -1;
-+}
-+
-+/*
-+ * Find a specific PCI IRQ entry.
-+ * Not an __init, possibly needed by modules
-+ */
-+static int pin_2_irq(int idx, int apic, int pin);
-+
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-+{
-+ int apic, i, best_guess = -1;
-+
-+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
-+ "slot:%d, pin:%d.\n", bus, slot, pin);
-+ if (mp_bus_id_to_pci_bus[bus] == -1) {
-+ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-+ return -1;
-+ }
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++)
-+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
-+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
-+ break;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
-+ !mp_irqs[i].mpc_irqtype &&
-+ (bus == lbus) &&
-+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
-+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
-+
-+ if (!(apic || IO_APIC_IRQ(irq)))
-+ continue;
-+
-+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
-+ return irq;
-+ /*
-+ * Use the first all-but-pin matching entry as a
-+ * best-guess fuzzy result for broken mptables.
-+ */
-+ if (best_guess < 0)
-+ best_guess = irq;
-+ }
-+ }
-+ return best_guess;
-+}
-+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-+
-+/*
-+ * This function currently is only a helper for the i386 smp boot process where
-+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
-+ * so mask in all cases should simply be TARGET_CPUS
-+ */
-+#ifdef CONFIG_SMP
-+#ifndef CONFIG_XEN
-+void __init setup_ioapic_dest(void)
-+{
-+ int pin, ioapic, irq, irq_entry;
-+
-+ if (skip_ioapic_setup == 1)
-+ return;
-+
-+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-+ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-+ if (irq_entry == -1)
-+ continue;
-+ irq = pin_2_irq(irq_entry, ioapic, pin);
-+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
-+ }
-+
-+ }
-+}
-+#endif /* !CONFIG_XEN */
-+#endif
-+
-+/*
-+ * EISA Edge/Level control register, ELCR
-+ */
-+static int EISA_ELCR(unsigned int irq)
-+{
-+ if (irq < 16) {
-+ unsigned int port = 0x4d0 + (irq >> 3);
-+ return (inb(port) >> (irq & 7)) & 1;
-+ }
-+ apic_printk(APIC_VERBOSE, KERN_INFO
-+ "Broken MPtable reports ISA irq %d\n", irq);
-+ return 0;
-+}
-+
-+/* EISA interrupts are always polarity zero and can be edge or level
-+ * trigger depending on the ELCR value. If an interrupt is listed as
-+ * EISA conforming in the MP table, that means its trigger type must
-+ * be read in from the ELCR */
-+
-+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-+#define default_EISA_polarity(idx) (0)
-+
-+/* ISA interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_ISA_trigger(idx) (0)
-+#define default_ISA_polarity(idx) (0)
-+
-+/* PCI interrupts are always polarity one level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_PCI_trigger(idx) (1)
-+#define default_PCI_polarity(idx) (1)
-+
-+/* MCA interrupts are always polarity zero level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_MCA_trigger(idx) (1)
-+#define default_MCA_polarity(idx) (0)
-+
-+/* NEC98 interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_NEC98_trigger(idx) (0)
-+#define default_NEC98_polarity(idx) (0)
-+
-+static int __init MPBIOS_polarity(int idx)
-+{
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+ int polarity;
-+
-+ /*
-+ * Determine IRQ line polarity (high active or low active):
-+ */
-+ switch (mp_irqs[idx].mpc_irqflag & 3)
-+ {
-+ case 0: /* conforms, ie. bus-type dependent polarity */
-+ {
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ {
-+ polarity = default_ISA_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_EISA: /* EISA pin */
-+ {
-+ polarity = default_EISA_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ polarity = default_PCI_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_MCA: /* MCA pin */
-+ {
-+ polarity = default_MCA_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_NEC98: /* NEC 98 pin */
-+ {
-+ polarity = default_NEC98_polarity(idx);
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ }
-+ break;
-+ }
-+ case 1: /* high active */
-+ {
-+ polarity = 0;
-+ break;
-+ }
-+ case 2: /* reserved */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ case 3: /* low active */
-+ {
-+ polarity = 1;
-+ break;
-+ }
-+ default: /* invalid */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ }
-+ return polarity;
-+}
-+
-+static int MPBIOS_trigger(int idx)
-+{
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+ int trigger;
-+
-+ /*
-+ * Determine IRQ trigger mode (edge or level sensitive):
-+ */
-+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
-+ {
-+ case 0: /* conforms, ie. bus-type dependent */
-+ {
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ {
-+ trigger = default_ISA_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_EISA: /* EISA pin */
-+ {
-+ trigger = default_EISA_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ trigger = default_PCI_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_MCA: /* MCA pin */
-+ {
-+ trigger = default_MCA_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_NEC98: /* NEC 98 pin */
-+ {
-+ trigger = default_NEC98_trigger(idx);
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 1;
-+ break;
-+ }
-+ }
-+ break;
-+ }
-+ case 1: /* edge */
-+ {
-+ trigger = 0;
-+ break;
-+ }
-+ case 2: /* reserved */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 1;
-+ break;
-+ }
-+ case 3: /* level */
-+ {
-+ trigger = 1;
-+ break;
-+ }
-+ default: /* invalid */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 0;
-+ break;
-+ }
-+ }
-+ return trigger;
-+}
-+
-+static inline int irq_polarity(int idx)
-+{
-+ return MPBIOS_polarity(idx);
-+}
-+
-+static inline int irq_trigger(int idx)
-+{
-+ return MPBIOS_trigger(idx);
-+}
-+
-+static int pin_2_irq(int idx, int apic, int pin)
-+{
-+ int irq, i;
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+
-+ /*
-+ * Debugging check, we are in big trouble if this message pops up!
-+ */
-+ if (mp_irqs[idx].mpc_dstirq != pin)
-+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-+
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ case MP_BUS_EISA:
-+ case MP_BUS_MCA:
-+ case MP_BUS_NEC98:
-+ {
-+ irq = mp_irqs[idx].mpc_srcbusirq;
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ /*
-+ * PCI IRQs are mapped in order
-+ */
-+ i = irq = 0;
-+ while (i < apic)
-+ irq += nr_ioapic_registers[i++];
-+ irq += pin;
-+
-+ /*
-+ * For MPS mode, so far only needed by ES7000 platform
-+ */
-+ if (ioapic_renumber_irq)
-+ irq = ioapic_renumber_irq(apic, irq);
-+
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_ERR "unknown bus type %d.\n",bus);
-+ irq = 0;
-+ break;
-+ }
-+ }
-+
-+ /*
-+ * PCI IRQ command line redirection. Yes, limits are hardcoded.
-+ */
-+ if ((pin >= 16) && (pin <= 23)) {
-+ if (pirq_entries[pin-16] != -1) {
-+ if (!pirq_entries[pin-16]) {
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG
-+ "disabling PIRQ%d\n", pin-16);
-+ } else {
-+ irq = pirq_entries[pin-16];
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG
-+ "using PIRQ%d -> IRQ %d\n",
-+ pin-16, irq);
-+ }
-+ }
-+ }
-+ return irq;
-+}
-+
-+static inline int IO_APIC_irq_trigger(int irq)
-+{
-+ int apic, idx, pin;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+ idx = find_irq_entry(apic,pin,mp_INT);
-+ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
-+ return irq_trigger(idx);
-+ }
-+ }
-+ /*
-+ * nonexistent IRQs are edge default
-+ */
-+ return 0;
-+}
-+
-+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
-+
-+int assign_irq_vector(int irq)
-+{
-+ static int current_vector = FIRST_DEVICE_VECTOR;
-+ physdev_op_t op;
-+
-+ BUG_ON(irq >= NR_IRQ_VECTORS);
-+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
-+ return IO_APIC_VECTOR(irq);
-+
-+ op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
-+ op.u.irq_op.irq = irq;
-+ if (HYPERVISOR_physdev_op(&op))
-+ return -ENOSPC;
-+ current_vector = op.u.irq_op.vector;
-+
-+ vector_irq[current_vector] = irq;
-+ if (irq != AUTO_ASSIGN)
-+ IO_APIC_VECTOR(irq) = current_vector;
-+
-+ return current_vector;
-+}
-+
-+#ifndef CONFIG_XEN
-+static struct hw_interrupt_type ioapic_level_type;
-+static struct hw_interrupt_type ioapic_edge_type;
-+
-+#define IOAPIC_AUTO -1
-+#define IOAPIC_EDGE 0
-+#define IOAPIC_LEVEL 1
-+
-+static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
-+{
-+ if (use_pci_vector() && !platform_legacy_irq(irq)) {
-+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+ trigger == IOAPIC_LEVEL)
-+ irq_desc[vector].handler = &ioapic_level_type;
-+ else
-+ irq_desc[vector].handler = &ioapic_edge_type;
-+ set_intr_gate(vector, interrupt[vector]);
-+ } else {
-+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+ trigger == IOAPIC_LEVEL)
-+ irq_desc[irq].handler = &ioapic_level_type;
-+ else
-+ irq_desc[irq].handler = &ioapic_edge_type;
-+ set_intr_gate(vector, interrupt[irq]);
-+ }
-+}
-+#else
-+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
-+#endif
-+
-+static void __init setup_IO_APIC_irqs(void)
-+{
-+ struct IO_APIC_route_entry entry;
-+ int apic, pin, idx, irq, first_notcon = 1, vector;
-+ unsigned long flags;
-+
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+
-+ /*
-+ * add it to the IO-APIC irq-routing table:
-+ */
-+ memset(&entry,0,sizeof(entry));
-+
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.mask = 0; /* enable IRQ */
-+ entry.dest.logical.logical_dest =
-+ cpu_mask_to_apicid(TARGET_CPUS);
-+
-+ idx = find_irq_entry(apic,pin,mp_INT);
-+ if (idx == -1) {
-+ if (first_notcon) {
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG
-+ " IO-APIC (apicid-pin) %d-%d",
-+ mp_ioapics[apic].mpc_apicid,
-+ pin);
-+ first_notcon = 0;
-+ } else
-+ apic_printk(APIC_VERBOSE, ", %d-%d",
-+ mp_ioapics[apic].mpc_apicid, pin);
-+ continue;
-+ }
-+
-+ entry.trigger = irq_trigger(idx);
-+ entry.polarity = irq_polarity(idx);
-+
-+ if (irq_trigger(idx)) {
-+ entry.trigger = 1;
-+ entry.mask = 1;
-+ }
-+
-+ irq = pin_2_irq(idx, apic, pin);
-+ /*
-+ * skip adding the timer int on secondary nodes, which causes
-+ * a small but painful rift in the time-space continuum
-+ */
-+ if (multi_timer_check(apic, irq))
-+ continue;
-+ else
-+ add_pin_to_irq(irq, apic, pin);
-+
-+ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
-+ continue;
-+
-+ if (IO_APIC_IRQ(irq)) {
-+ vector = assign_irq_vector(irq);
-+ entry.vector = vector;
-+ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-+
-+ if (!apic && (irq < 16))
-+ disable_8259A_irq(irq);
-+ }
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+ set_native_irq_info(irq, TARGET_CPUS);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ }
-+ }
-+
-+ if (!first_notcon)
-+ apic_printk(APIC_VERBOSE, " not connected.\n");
-+}
-+
-+/*
-+ * Set up the 8259A-master output pin:
-+ */
-+#ifndef CONFIG_XEN
-+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
-+{
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ memset(&entry,0,sizeof(entry));
-+
-+ disable_8259A_irq(0);
-+
-+ /* mask LVT0 */
-+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+
-+ /*
-+ * We use logical delivery to get the timer IRQ
-+ * to the first CPU.
-+ */
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.mask = 0; /* unmask IRQ now */
-+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.polarity = 0;
-+ entry.trigger = 0;
-+ entry.vector = vector;
-+
-+ /*
-+ * The timer IRQ doesn't have to know that behind the
-+ * scene we have a 8259A-master in AEOI mode ...
-+ */
-+ irq_desc[0].handler = &ioapic_edge_type;
-+
-+ /*
-+ * Add it to the IO-APIC irq-routing table:
-+ */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ enable_8259A_irq(0);
-+}
-+
-+static inline void UNEXPECTED_IO_APIC(void)
-+{
-+}
-+
-+void __init print_IO_APIC(void)
-+{
-+ int apic, i;
-+ union IO_APIC_reg_00 reg_00;
-+ union IO_APIC_reg_01 reg_01;
-+ union IO_APIC_reg_02 reg_02;
-+ union IO_APIC_reg_03 reg_03;
-+ unsigned long flags;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-+ for (i = 0; i < nr_ioapics; i++)
-+ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-+ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
-+
-+ /*
-+ * We are a bit conservative about what we expect. We have to
-+ * know about every hardware change ASAP.
-+ */
-+ printk(KERN_INFO "testing the IO APIC.......................\n");
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ reg_01.raw = io_apic_read(apic, 1);
-+ if (reg_01.bits.version >= 0x10)
-+ reg_02.raw = io_apic_read(apic, 2);
-+ if (reg_01.bits.version >= 0x20)
-+ reg_03.raw = io_apic_read(apic, 3);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
-+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
-+ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
-+ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
-+ if (reg_00.bits.ID >= get_physical_broadcast())
-+ UNEXPECTED_IO_APIC();
-+ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
-+ UNEXPECTED_IO_APIC();
-+
-+ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
-+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
-+ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
-+ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
-+ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
-+ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
-+ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
-+ (reg_01.bits.entries != 0x2E) &&
-+ (reg_01.bits.entries != 0x3F)
-+ )
-+ UNEXPECTED_IO_APIC();
-+
-+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
-+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
-+ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
-+ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
-+ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
-+ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
-+ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
-+ )
-+ UNEXPECTED_IO_APIC();
-+ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
-+ UNEXPECTED_IO_APIC();
-+
-+ /*
-+ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
-+ * but the value of reg_02 is read as the previous read register
-+ * value, so ignore it if reg_02 == reg_01.
-+ */
-+ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
-+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
-+ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
-+ UNEXPECTED_IO_APIC();
-+ }
-+
-+ /*
-+ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
-+ * or reg_03, but the value of reg_0[23] is read as the previous read
-+ * register value, so ignore it if reg_03 == reg_0[12].
-+ */
-+ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
-+ reg_03.raw != reg_01.raw) {
-+ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
-+ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
-+ if (reg_03.bits.__reserved_1)
-+ UNEXPECTED_IO_APIC();
-+ }
-+
-+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
-+
-+ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
-+ " Stat Dest Deli Vect: \n");
-+
-+ for (i = 0; i <= reg_01.bits.entries; i++) {
-+ struct IO_APIC_route_entry entry;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
-+ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ printk(KERN_DEBUG " %02x %03X %02X ",
-+ i,
-+ entry.dest.logical.logical_dest,
-+ entry.dest.physical.physical_dest
-+ );
-+
-+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
-+ entry.mask,
-+ entry.trigger,
-+ entry.irr,
-+ entry.polarity,
-+ entry.delivery_status,
-+ entry.dest_mode,
-+ entry.delivery_mode,
-+ entry.vector
-+ );
-+ }
-+ }
-+ if (use_pci_vector())
-+ printk(KERN_INFO "Using vector-based indexing\n");
-+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
-+ for (i = 0; i < NR_IRQS; i++) {
-+ struct irq_pin_list *entry = irq_2_pin + i;
-+ if (entry->pin < 0)
-+ continue;
-+ if (use_pci_vector() && !platform_legacy_irq(i))
-+ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
-+ else
-+ printk(KERN_DEBUG "IRQ%d ", i);
-+ for (;;) {
-+ printk("-> %d:%d", entry->apic, entry->pin);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+ printk("\n");
-+ }
-+
-+ printk(KERN_INFO ".................................... done.\n");
-+
-+ return;
-+}
-+
-+#if 0
-+
-+static void print_APIC_bitfield (int base)
-+{
-+ unsigned int v;
-+ int i, j;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-+ for (i = 0; i < 8; i++) {
-+ v = apic_read(base + i*0x10);
-+ for (j = 0; j < 32; j++) {
-+ if (v & (1<<j))
-+ printk("1");
-+ else
-+ printk("0");
-+ }
-+ printk("\n");
-+ }
-+}
-+
-+void /*__init*/ print_local_APIC(void * dummy)
-+{
-+ unsigned int v, ver, maxlvt;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-+ smp_processor_id(), hard_smp_processor_id());
-+ v = apic_read(APIC_ID);
-+ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
-+ v = apic_read(APIC_LVR);
-+ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-+ ver = GET_APIC_VERSION(v);
-+ maxlvt = get_maxlvt();
-+
-+ v = apic_read(APIC_TASKPRI);
-+ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-+
-+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
-+ v = apic_read(APIC_ARBPRI);
-+ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-+ v & APIC_ARBPRI_MASK);
-+ v = apic_read(APIC_PROCPRI);
-+ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-+ }
-+
-+ v = apic_read(APIC_EOI);
-+ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-+ v = apic_read(APIC_RRR);
-+ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-+ v = apic_read(APIC_LDR);
-+ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-+ v = apic_read(APIC_DFR);
-+ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-+ v = apic_read(APIC_SPIV);
-+ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-+
-+ printk(KERN_DEBUG "... APIC ISR field:\n");
-+ print_APIC_bitfield(APIC_ISR);
-+ printk(KERN_DEBUG "... APIC TMR field:\n");
-+ print_APIC_bitfield(APIC_TMR);
-+ printk(KERN_DEBUG "... APIC IRR field:\n");
-+ print_APIC_bitfield(APIC_IRR);
-+
-+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
-+ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
-+ apic_write(APIC_ESR, 0);
-+ v = apic_read(APIC_ESR);
-+ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-+ }
-+
-+ v = apic_read(APIC_ICR);
-+ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-+ v = apic_read(APIC_ICR2);
-+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
-+
-+ v = apic_read(APIC_LVTT);
-+ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-+
-+ if (maxlvt > 3) { /* PC is LVT#4. */
-+ v = apic_read(APIC_LVTPC);
-+ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-+ }
-+ v = apic_read(APIC_LVT0);
-+ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-+ v = apic_read(APIC_LVT1);
-+ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-+
-+ if (maxlvt > 2) { /* ERR is LVT#3. */
-+ v = apic_read(APIC_LVTERR);
-+ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-+ }
-+
-+ v = apic_read(APIC_TMICT);
-+ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-+ v = apic_read(APIC_TMCCT);
-+ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-+ v = apic_read(APIC_TDCR);
-+ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-+ printk("\n");
-+}
-+
-+void print_all_local_APICs (void)
-+{
-+ on_each_cpu(print_local_APIC, NULL, 1, 1);
-+}
-+
-+void /*__init*/ print_PIC(void)
-+{
-+ unsigned int v;
-+ unsigned long flags;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "\nprinting PIC contents\n");
-+
-+ spin_lock_irqsave(&i8259A_lock, flags);
-+
-+ v = inb(0xa1) << 8 | inb(0x21);
-+ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
-+
-+ v = inb(0xa0) << 8 | inb(0x20);
-+ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
-+
-+ outb(0x0b,0xa0);
-+ outb(0x0b,0x20);
-+ v = inb(0xa0) << 8 | inb(0x20);
-+ outb(0x0a,0xa0);
-+ outb(0x0a,0x20);
-+
-+ spin_unlock_irqrestore(&i8259A_lock, flags);
-+
-+ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
-+
-+ v = inb(0x4d1) << 8 | inb(0x4d0);
-+ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-+}
-+
-+#endif /* 0 */
-+
-+#else
-+void __init print_IO_APIC(void) { }
-+#endif /* !CONFIG_XEN */
-+
-+static void __init enable_IO_APIC(void)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ int i8259_apic, i8259_pin;
-+ int i, apic;
-+ unsigned long flags;
-+
-+ for (i = 0; i < PIN_MAP_SIZE; i++) {
-+ irq_2_pin[i].pin = -1;
-+ irq_2_pin[i].next = 0;
-+ }
-+ if (!pirqs_enabled)
-+ for (i = 0; i < MAX_PIRQS; i++)
-+ pirq_entries[i] = -1;
-+
-+ /*
-+ * The number of IO-APIC IRQ registers (== #pins):
-+ */
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(apic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-+ }
-+ for(apic = 0; apic < nr_ioapics; apic++) {
-+ int pin;
-+ /* See if any of the pins is in ExtINT mode */
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+ struct IO_APIC_route_entry entry;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+
-+ /* If the interrupt line is enabled and in ExtInt mode
-+ * I have found the pin where the i8259 is connected.
-+ */
-+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
-+ ioapic_i8259.apic = apic;
-+ ioapic_i8259.pin = pin;
-+ goto found_i8259;
-+ }
-+ }
-+ }
-+ found_i8259:
-+ /* Look to see what if the MP table has reported the ExtINT */
-+ /* If we could not find the appropriate pin by looking at the ioapic
-+ * the i8259 probably is not connected the ioapic but give the
-+ * mptable a chance anyway.
-+ */
-+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
-+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
-+ /* Trust the MP table if nothing is setup in the hardware */
-+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
-+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
-+ ioapic_i8259.pin = i8259_pin;
-+ ioapic_i8259.apic = i8259_apic;
-+ }
-+ /* Complain if the MP table and the hardware disagree */
-+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
-+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
-+ {
-+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
-+ }
-+
-+ /*
-+ * Do not trust the IO-APIC being empty at bootup
-+ */
-+ clear_IO_APIC();
-+}
-+
-+/*
-+ * Not an __init, needed by the reboot code
-+ */
-+void disable_IO_APIC(void)
-+{
-+ /*
-+ * Clear the IO-APIC before rebooting:
-+ */
-+ clear_IO_APIC();
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * If the i8259 is routed through an IOAPIC
-+ * Put that IOAPIC in virtual wire mode
-+ * so legacy interrupts can be delivered.
-+ */
-+ if (ioapic_i8259.pin != -1) {
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ memset(&entry, 0, sizeof(entry));
-+ entry.mask = 0; /* Enabled */
-+ entry.trigger = 0; /* Edge */
-+ entry.irr = 0;
-+ entry.polarity = 0; /* High */
-+ entry.delivery_status = 0;
-+ entry.dest_mode = 0; /* Physical */
-+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
-+ entry.vector = 0;
-+ entry.dest.physical.physical_dest =
-+ GET_APIC_ID(apic_read(APIC_ID));
-+
-+ /*
-+ * Add it to the IO-APIC irq-routing table:
-+ */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
-+ *(((int *)&entry)+1));
-+ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
-+ *(((int *)&entry)+0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ }
-+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-+#endif
-+}
-+
-+/*
-+ * function to set the IO-APIC physical IDs based on the
-+ * values stored in the MPC table.
-+ *
-+ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
-+ */
-+
-+#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
-+static void __init setup_ioapic_ids_from_mpc(void)
-+{
-+ union IO_APIC_reg_00 reg_00;
-+ physid_mask_t phys_id_present_map;
-+ int apic;
-+ int i;
-+ unsigned char old_id;
-+ unsigned long flags;
-+
-+ /*
-+ * Don't check I/O APIC IDs for xAPIC systems. They have
-+ * no meaning without the serial APIC bus.
-+ */
-+ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15))
-+ return;
-+ /*
-+ * This is broken; anything with a real cpu count has to
-+ * circumvent this idiocy regardless.
-+ */
-+ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
-+
-+ /*
-+ * Set the IOAPIC ID to the value stored in the MPC table.
-+ */
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+ /* Read the register 0 value */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ old_id = mp_ioapics[apic].mpc_apicid;
-+
-+ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
-+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
-+ apic, mp_ioapics[apic].mpc_apicid);
-+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-+ reg_00.bits.ID);
-+ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
-+ }
-+
-+ /*
-+ * Sanity check, is the ID really free? Every APIC in a
-+ * system must have a unique ID or we get lots of nice
-+ * 'stuck on smp_invalidate_needed IPI wait' messages.
-+ */
-+ if (check_apicid_used(phys_id_present_map,
-+ mp_ioapics[apic].mpc_apicid)) {
-+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
-+ apic, mp_ioapics[apic].mpc_apicid);
-+ for (i = 0; i < get_physical_broadcast(); i++)
-+ if (!physid_isset(i, phys_id_present_map))
-+ break;
-+ if (i >= get_physical_broadcast())
-+ panic("Max APIC ID exceeded!\n");
-+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-+ i);
-+ physid_set(i, phys_id_present_map);
-+ mp_ioapics[apic].mpc_apicid = i;
-+ } else {
-+ physid_mask_t tmp;
-+ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
-+ apic_printk(APIC_VERBOSE, "Setting %d in the "
-+ "phys_id_present_map\n",
-+ mp_ioapics[apic].mpc_apicid);
-+ physids_or(phys_id_present_map, phys_id_present_map, tmp);
-+ }
-+
-+
-+ /*
-+ * We need to adjust the IRQ routing table
-+ * if the ID changed.
-+ */
-+ if (old_id != mp_ioapics[apic].mpc_apicid)
-+ for (i = 0; i < mp_irq_entries; i++)
-+ if (mp_irqs[i].mpc_dstapic == old_id)
-+ mp_irqs[i].mpc_dstapic
-+ = mp_ioapics[apic].mpc_apicid;
-+
-+ /*
-+ * Read the right value from the MPC table and
-+ * write it into the ID register.
-+ */
-+ apic_printk(APIC_VERBOSE, KERN_INFO
-+ "...changing IO-APIC physical APIC ID to %d ...",
-+ mp_ioapics[apic].mpc_apicid);
-+
-+ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0, reg_00.raw);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ /*
-+ * Sanity check
-+ */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
-+ printk("could not set ID!\n");
-+ else
-+ apic_printk(APIC_VERBOSE, " ok.\n");
-+ }
-+}
-+#else
-+static void __init setup_ioapic_ids_from_mpc(void) { }
-+#endif
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * There is a nasty bug in some older SMP boards, their mptable lies
-+ * about the timer IRQ. We do the following to work around the situation:
-+ *
-+ * - timer IRQ defaults to IO-APIC IRQ
-+ * - if this function detects that timer IRQs are defunct, then we fall
-+ * back to ISA timer IRQs
-+ */
-+static int __init timer_irq_works(void)
-+{
-+ unsigned long t1 = jiffies;
-+
-+ local_irq_enable();
-+ /* Let ten ticks pass... */
-+ mdelay((10 * 1000) / HZ);
-+
-+ /*
-+ * Expect a few ticks at least, to be sure some possible
-+ * glue logic does not lock up after one or two first
-+ * ticks in a non-ExtINT mode. Also the local APIC
-+ * might have cached one ExtINT interrupt. Finally, at
-+ * least one tick may be lost due to delays.
-+ */
-+ if (jiffies - t1 > 4)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/*
-+ * In the SMP+IOAPIC case it might happen that there are an unspecified
-+ * number of pending IRQ events unhandled. These cases are very rare,
-+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
-+ * better to do it this way as thus we do not have to be aware of
-+ * 'pending' interrupts in the IRQ path, except at this point.
-+ */
-+/*
-+ * Edge triggered needs to resend any interrupt
-+ * that was delayed but this is now handled in the device
-+ * independent code.
-+ */
-+
-+/*
-+ * Starting up a edge-triggered IO-APIC interrupt is
-+ * nasty - we need to make sure that we get the edge.
-+ * If it is already asserted for some reason, we need
-+ * return 1 to indicate that is was pending.
-+ *
-+ * This is not complete - we should be able to fake
-+ * an edge even if it isn't on the 8259A...
-+ */
-+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
-+{
-+ int was_pending = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ if (irq < 16) {
-+ disable_8259A_irq(irq);
-+ if (i8259A_irq_pending(irq))
-+ was_pending = 1;
-+ }
-+ __unmask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return was_pending;
-+}
-+
-+/*
-+ * Once we have recorded IRQ_PENDING already, we can mask the
-+ * interrupt for real. This prevents IRQ storms from unhandled
-+ * devices.
-+ */
-+static void ack_edge_ioapic_irq(unsigned int irq)
-+{
-+ move_irq(irq);
-+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
-+ == (IRQ_PENDING | IRQ_DISABLED))
-+ mask_IO_APIC_irq(irq);
-+ ack_APIC_irq();
-+}
-+
-+/*
-+ * Level triggered interrupts can just be masked,
-+ * and shutting down and starting up the interrupt
-+ * is the same as enabling and disabling them -- except
-+ * with a startup need to return a "was pending" value.
-+ *
-+ * Level triggered interrupts are special because we
-+ * do not touch any IO-APIC register while handling
-+ * them. We ack the APIC in the end-IRQ handler, not
-+ * in the start-IRQ-handler. Protection against reentrance
-+ * from the same interrupt is still provided, both by the
-+ * generic IRQ layer and by the fact that an unacked local
-+ * APIC does not accept IRQs.
-+ */
-+static unsigned int startup_level_ioapic_irq (unsigned int irq)
-+{
-+ unmask_IO_APIC_irq(irq);
-+
-+ return 0; /* don't check for pending */
-+}
-+
-+static void end_level_ioapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+ int i;
-+
-+ move_irq(irq);
-+/*
-+ * It appears there is an erratum which affects at least version 0x11
-+ * of I/O APIC (that's the 82093AA and cores integrated into various
-+ * chipsets). Under certain conditions a level-triggered interrupt is
-+ * erroneously delivered as edge-triggered one but the respective IRR
-+ * bit gets set nevertheless. As a result the I/O unit expects an EOI
-+ * message but it will never arrive and further interrupts are blocked
-+ * from the source. The exact reason is so far unknown, but the
-+ * phenomenon was observed when two consecutive interrupt requests
-+ * from a given source get delivered to the same CPU and the source is
-+ * temporarily disabled in between.
-+ *
-+ * A workaround is to simulate an EOI message manually. We achieve it
-+ * by setting the trigger mode to edge and then to level when the edge
-+ * trigger mode gets detected in the TMR of a local APIC for a
-+ * level-triggered interrupt. We mask the source for the time of the
-+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
-+ * The idea is from Manfred Spraul. --macro
-+ */
-+ i = IO_APIC_VECTOR(irq);
-+
-+ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
-+
-+ ack_APIC_irq();
-+
-+ if (!(v & (1 << (i & 0x1f)))) {
-+ atomic_inc(&irq_mis_count);
-+ spin_lock(&ioapic_lock);
-+ __mask_and_edge_IO_APIC_irq(irq);
-+ __unmask_and_level_IO_APIC_irq(irq);
-+ spin_unlock(&ioapic_lock);
-+ }
-+}
-+
-+#ifdef CONFIG_PCI_MSI
-+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ return startup_edge_ioapic_irq(irq);
-+}
-+
-+static void ack_edge_ioapic_vector(unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ move_native_irq(vector);
-+ ack_edge_ioapic_irq(irq);
-+}
-+
-+static unsigned int startup_level_ioapic_vector (unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ return startup_level_ioapic_irq (irq);
-+}
-+
-+static void end_level_ioapic_vector (unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ move_native_irq(vector);
-+ end_level_ioapic_irq(irq);
-+}
-+
-+static void mask_IO_APIC_vector (unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ mask_IO_APIC_irq(irq);
-+}
-+
-+static void unmask_IO_APIC_vector (unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ unmask_IO_APIC_irq(irq);
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_vector (unsigned int vector,
-+ cpumask_t cpu_mask)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ set_native_irq_info(vector, cpu_mask);
-+ set_ioapic_affinity_irq(irq, cpu_mask);
-+}
-+#endif
-+#endif
-+
-+/*
-+ * Level and edge triggered IO-APIC interrupts need different handling,
-+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
-+ * handled with the level-triggered descriptor, but that one has slightly
-+ * more overhead. Level-triggered interrupts cannot be handled with the
-+ * edge-triggered handler, without risking IRQ storms and other ugly
-+ * races.
-+ */
-+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
-+ .typename = "IO-APIC-edge",
-+ .startup = startup_edge_ioapic,
-+ .shutdown = shutdown_edge_ioapic,
-+ .enable = enable_edge_ioapic,
-+ .disable = disable_edge_ioapic,
-+ .ack = ack_edge_ioapic,
-+ .end = end_edge_ioapic,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_ioapic_affinity,
-+#endif
-+};
-+
-+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
-+ .typename = "IO-APIC-level",
-+ .startup = startup_level_ioapic,
-+ .shutdown = shutdown_level_ioapic,
-+ .enable = enable_level_ioapic,
-+ .disable = disable_level_ioapic,
-+ .ack = mask_and_ack_level_ioapic,
-+ .end = end_level_ioapic,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_ioapic_affinity,
-+#endif
-+};
-+#endif /* !CONFIG_XEN */
-+
-+static inline void init_IO_APIC_traps(void)
-+{
-+ int irq;
-+
-+ /*
-+ * NOTE! The local APIC isn't very good at handling
-+ * multiple interrupts at the same interrupt level.
-+ * As the interrupt level is determined by taking the
-+ * vector number and shifting that right by 4, we
-+ * want to spread these out a bit so that they don't
-+ * all fall in the same interrupt level.
-+ *
-+ * Also, we've got to be careful not to trash gate
-+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
-+ */
-+ for (irq = 0; irq < NR_IRQS ; irq++) {
-+ int tmp = irq;
-+ if (use_pci_vector()) {
-+ if (!platform_legacy_irq(tmp))
-+ if ((tmp = vector_to_irq(tmp)) == -1)
-+ continue;
-+ }
-+ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
-+ /*
-+ * Hmm.. We don't have an entry for this,
-+ * so default to an old-fashioned 8259
-+ * interrupt if we can..
-+ */
-+ if (irq < 16)
-+ make_8259A_irq(irq);
-+#ifndef CONFIG_XEN
-+ else
-+ /* Strange. Oh, well.. */
-+ irq_desc[irq].handler = &no_irq_type;
-+#endif
-+ }
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+static void enable_lapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+
-+ v = apic_read(APIC_LVT0);
-+ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
-+}
-+
-+static void disable_lapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+
-+ v = apic_read(APIC_LVT0);
-+ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
-+}
-+
-+static void ack_lapic_irq (unsigned int irq)
-+{
-+ ack_APIC_irq();
-+}
-+
-+static void end_lapic_irq (unsigned int i) { /* nothing */ }
-+
-+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
-+ .typename = "local-APIC-edge",
-+ .startup = NULL, /* startup_irq() not used for IRQ0 */
-+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
-+ .enable = enable_lapic_irq,
-+ .disable = disable_lapic_irq,
-+ .ack = ack_lapic_irq,
-+ .end = end_lapic_irq
-+};
-+
-+static void setup_nmi (void)
-+{
-+ /*
-+ * Dirty trick to enable the NMI watchdog ...
-+ * We put the 8259A master into AEOI mode and
-+ * unmask on all local APICs LVT0 as NMI.
-+ *
-+ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-+ * is from Maciej W. Rozycki - so we do not have to EOI from
-+ * the NMI handler or the timer interrupt.
-+ */
-+ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
-+
-+ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
-+
-+ apic_printk(APIC_VERBOSE, " done.\n");
-+}
-+
-+/*
-+ * This looks a bit hackish but it's about the only one way of sending
-+ * a few INTA cycles to 8259As and any associated glue logic. ICR does
-+ * not support the ExtINT mode, unfortunately. We need to send these
-+ * cycles as some i82489DX-based boards have glue logic that keeps the
-+ * 8259A interrupt line asserted until INTA. --macro
-+ */
-+static inline void unlock_ExtINT_logic(void)
-+{
-+ int apic, pin, i;
-+ struct IO_APIC_route_entry entry0, entry1;
-+ unsigned char save_control, save_freq_select;
-+ unsigned long flags;
-+
-+ pin = find_isa_irq_pin(8, mp_INT);
-+ apic = find_isa_irq_apic(8, mp_INT);
-+ if (pin == -1)
-+ return;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ clear_IO_APIC_pin(apic, pin);
-+
-+ memset(&entry1, 0, sizeof(entry1));
-+
-+ entry1.dest_mode = 0; /* physical delivery */
-+ entry1.mask = 0; /* unmask IRQ now */
-+ entry1.dest.physical.physical_dest = hard_smp_processor_id();
-+ entry1.delivery_mode = dest_ExtINT;
-+ entry1.polarity = entry0.polarity;
-+ entry1.trigger = 0;
-+ entry1.vector = 0;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
-+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ save_control = CMOS_READ(RTC_CONTROL);
-+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-+ RTC_FREQ_SELECT);
-+ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-+
-+ i = 100;
-+ while (i-- > 0) {
-+ mdelay(10);
-+ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-+ i -= 10;
-+ }
-+
-+ CMOS_WRITE(save_control, RTC_CONTROL);
-+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+ clear_IO_APIC_pin(apic, pin);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
-+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+/*
-+ * This code may look a bit paranoid, but it's supposed to cooperate with
-+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
-+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
-+ * fanatically on his truly buggy board.
-+ */
-+static inline void check_timer(void)
-+{
-+ int apic1, pin1, apic2, pin2;
-+ int vector;
-+
-+ /*
-+ * get/set the timer IRQ vector:
-+ */
-+ disable_8259A_irq(0);
-+ vector = assign_irq_vector(0);
-+ set_intr_gate(vector, interrupt[0]);
-+
-+ /*
-+ * Subtle, code in do_timer_interrupt() expects an AEOI
-+ * mode for the 8259A whenever interrupts are routed
-+ * through I/O APICs. Also IRQ0 has to be enabled in
-+ * the 8259A which implies the virtual wire has to be
-+ * disabled in the local APIC.
-+ */
-+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+ init_8259A(1);
-+ timer_ack = 1;
-+ enable_8259A_irq(0);
-+
-+ pin1 = find_isa_irq_pin(0, mp_INT);
-+ apic1 = find_isa_irq_apic(0, mp_INT);
-+ pin2 = ioapic_i8259.pin;
-+ apic2 = ioapic_i8259.apic;
-+
-+ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
-+ vector, apic1, pin1, apic2, pin2);
-+
-+ if (pin1 != -1) {
-+ /*
-+ * Ok, does IRQ0 through the IOAPIC work?
-+ */
-+ unmask_IO_APIC_irq(0);
-+ if (timer_irq_works()) {
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ disable_8259A_irq(0);
-+ setup_nmi();
-+ enable_8259A_irq(0);
-+ }
-+ if (disable_timer_pin_1 > 0)
-+ clear_IO_APIC_pin(0, pin1);
-+ return;
-+ }
-+ clear_IO_APIC_pin(apic1, pin1);
-+ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
-+ "IO-APIC\n");
-+ }
-+
-+ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
-+ if (pin2 != -1) {
-+ printk("\n..... (found pin %d) ...", pin2);
-+ /*
-+ * legacy devices should be connected to IO APIC #0
-+ */
-+ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
-+ if (timer_irq_works()) {
-+ printk("works.\n");
-+ if (pin1 != -1)
-+ replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
-+ else
-+ add_pin_to_irq(0, apic2, pin2);
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ setup_nmi();
-+ }
-+ return;
-+ }
-+ /*
-+ * Cleanup, just in case ...
-+ */
-+ clear_IO_APIC_pin(apic2, pin2);
-+ }
-+ printk(" failed.\n");
-+
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
-+ nmi_watchdog = 0;
-+ }
-+
-+ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
-+
-+ disable_8259A_irq(0);
-+ irq_desc[0].handler = &lapic_irq_type;
-+ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
-+ enable_8259A_irq(0);
-+
-+ if (timer_irq_works()) {
-+ printk(" works.\n");
-+ return;
-+ }
-+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
-+ printk(" failed.\n");
-+
-+ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
-+
-+ timer_ack = 0;
-+ init_8259A(0);
-+ make_8259A_irq(0);
-+ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
-+
-+ unlock_ExtINT_logic();
-+
-+ if (timer_irq_works()) {
-+ printk(" works.\n");
-+ return;
-+ }
-+ printk(" failed :(.\n");
-+ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
-+ "report. Then try booting with the 'noapic' option");
-+}
-+#else
-+#define check_timer() ((void)0)
-+#endif
-+
-+/*
-+ *
-+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
-+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
-+ * Linux doesn't really care, as it's not actually used
-+ * for any interrupt handling anyway.
-+ */
-+#define PIC_IRQS (1 << PIC_CASCADE_IR)
-+
-+void __init setup_IO_APIC(void)
-+{
-+ enable_IO_APIC();
-+
-+ if (acpi_ioapic)
-+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
-+ else
-+ io_apic_irqs = ~PIC_IRQS;
-+
-+ printk("ENABLING IO-APIC IRQs\n");
-+
-+ /*
-+ * Set up IO-APIC IRQ routing.
-+ */
-+ if (!acpi_ioapic)
-+ setup_ioapic_ids_from_mpc();
-+#ifndef CONFIG_XEN
-+ sync_Arb_IDs();
-+#endif
-+ setup_IO_APIC_irqs();
-+ init_IO_APIC_traps();
-+ check_timer();
-+ if (!acpi_ioapic)
-+ print_IO_APIC();
-+}
-+
-+static int __init setup_disable_8254_timer(char *s)
-+{
-+ timer_over_8254 = -1;
-+ return 1;
-+}
-+static int __init setup_enable_8254_timer(char *s)
-+{
-+ timer_over_8254 = 2;
-+ return 1;
-+}
-+
-+__setup("disable_8254_timer", setup_disable_8254_timer);
-+__setup("enable_8254_timer", setup_enable_8254_timer);
-+
-+/*
-+ * Called after all the initialization is done. If we didnt find any
-+ * APIC bugs then we can allow the modify fast path
-+ */
-+
-+static int __init io_apic_bug_finalize(void)
-+{
-+ if(sis_apic_bug == -1)
-+ sis_apic_bug = 0;
-+ return 0;
-+}
-+
-+late_initcall(io_apic_bug_finalize);
-+
-+struct sysfs_ioapic_data {
-+ struct sys_device dev;
-+ struct IO_APIC_route_entry entry[0];
-+};
-+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
-+
-+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+ struct IO_APIC_route_entry *entry;
-+ struct sysfs_ioapic_data *data;
-+ unsigned long flags;
-+ int i;
-+
-+ data = container_of(dev, struct sysfs_ioapic_data, dev);
-+ entry = data->entry;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
-+ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
-+ }
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return 0;
-+}
-+
-+static int ioapic_resume(struct sys_device *dev)
-+{
-+ struct IO_APIC_route_entry *entry;
-+ struct sysfs_ioapic_data *data;
-+ unsigned long flags;
-+ union IO_APIC_reg_00 reg_00;
-+ int i;
-+
-+ data = container_of(dev, struct sysfs_ioapic_data, dev);
-+ entry = data->entry;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(dev->id, 0);
-+ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
-+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
-+ io_apic_write(dev->id, 0, reg_00.raw);
-+ }
-+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
-+ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
-+ }
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return 0;
-+}
-+
-+static struct sysdev_class ioapic_sysdev_class = {
-+ set_kset_name("ioapic"),
-+ .suspend = ioapic_suspend,
-+ .resume = ioapic_resume,
-+};
-+
-+static int __init ioapic_init_sysfs(void)
-+{
-+ struct sys_device * dev;
-+ int i, size, error = 0;
-+
-+ error = sysdev_class_register(&ioapic_sysdev_class);
-+ if (error)
-+ return error;
-+
-+ for (i = 0; i < nr_ioapics; i++ ) {
-+ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
-+ * sizeof(struct IO_APIC_route_entry);
-+ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
-+ if (!mp_ioapic_data[i]) {
-+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+ continue;
-+ }
-+ memset(mp_ioapic_data[i], 0, size);
-+ dev = &mp_ioapic_data[i]->dev;
-+ dev->id = i;
-+ dev->cls = &ioapic_sysdev_class;
-+ error = sysdev_register(dev);
-+ if (error) {
-+ kfree(mp_ioapic_data[i]);
-+ mp_ioapic_data[i] = NULL;
-+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+ continue;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+device_initcall(ioapic_init_sysfs);
-+
-+/* --------------------------------------------------------------------------
-+ ACPI-based IOAPIC Configuration
-+ -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+int __init io_apic_get_unique_id (int ioapic, int apic_id)
-+{
-+#ifndef CONFIG_XEN
-+ union IO_APIC_reg_00 reg_00;
-+ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
-+ physid_mask_t tmp;
-+ unsigned long flags;
-+ int i = 0;
-+
-+ /*
-+ * The P4 platform supports up to 256 APIC IDs on two separate APIC
-+ * buses (one for LAPICs, one for IOAPICs), where predecessors only
-+ * supports up to 16 on one shared APIC bus.
-+ *
-+ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
-+ * advantage of new APIC bus architecture.
-+ */
-+
-+ if (physids_empty(apic_id_map))
-+ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(ioapic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ if (apic_id >= get_physical_broadcast()) {
-+ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
-+ "%d\n", ioapic, apic_id, reg_00.bits.ID);
-+ apic_id = reg_00.bits.ID;
-+ }
-+
-+ /*
-+ * Every APIC in a system must have a unique ID or we get lots of nice
-+ * 'stuck on smp_invalidate_needed IPI wait' messages.
-+ */
-+ if (check_apicid_used(apic_id_map, apic_id)) {
-+
-+ for (i = 0; i < get_physical_broadcast(); i++) {
-+ if (!check_apicid_used(apic_id_map, i))
-+ break;
-+ }
-+
-+ if (i == get_physical_broadcast())
-+ panic("Max apic_id exceeded!\n");
-+
-+ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
-+ "trying %d\n", ioapic, apic_id, i);
-+
-+ apic_id = i;
-+ }
-+
-+ tmp = apicid_to_cpu_present(apic_id);
-+ physids_or(apic_id_map, apic_id_map, tmp);
-+
-+ if (reg_00.bits.ID != apic_id) {
-+ reg_00.bits.ID = apic_id;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(ioapic, 0, reg_00.raw);
-+ reg_00.raw = io_apic_read(ioapic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ /* Sanity check */
-+ if (reg_00.bits.ID != apic_id) {
-+ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
-+ return -1;
-+ }
-+ }
-+
-+ apic_printk(APIC_VERBOSE, KERN_INFO
-+ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
-+#endif /* !CONFIG_XEN */
-+
-+ return apic_id;
-+}
-+
-+
-+int __init io_apic_get_version (int ioapic)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(ioapic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return reg_01.bits.version;
-+}
-+
-+
-+int __init io_apic_get_redir_entries (int ioapic)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(ioapic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return reg_01.bits.entries;
-+}
-+
-+
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
-+{
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ if (!IO_APIC_IRQ(irq)) {
-+ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-+ ioapic);
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
-+ * Note that we mask (disable) IRQs now -- these get enabled when the
-+ * corresponding device driver registers for this IRQ.
-+ */
-+
-+ memset(&entry,0,sizeof(entry));
-+
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+ entry.trigger = edge_level;
-+ entry.polarity = active_high_low;
-+ entry.mask = 1;
-+
-+ /*
-+ * IRQs < 16 are already in the irq_2_pin[] map
-+ */
-+ if (irq >= 16)
-+ add_pin_to_irq(irq, ioapic, pin);
-+
-+ entry.vector = assign_irq_vector(irq);
-+
-+ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
-+ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
-+ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
-+ edge_level, active_high_low);
-+
-+ ioapic_register_intr(irq, entry.vector, edge_level);
-+
-+ if (!ioapic && (irq < 16))
-+ disable_8259A_irq(irq);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
-+ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
-+ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return 0;
-+}
-+
-+#endif /* CONFIG_ACPI */
-diff --git a/arch/i386/kernel/ioport-xen.c b/arch/i386/kernel/ioport-xen.c
-new file mode 100644
-index 0000000..fe395b1
---- /dev/null
-+++ b/arch/i386/kernel/ioport-xen.c
-@@ -0,0 +1,122 @@
-+/*
-+ * linux/arch/i386/kernel/ioport.c
-+ *
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <xen/interface/physdev.h>
-+
-+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
-+{
-+ unsigned long mask;
-+ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
-+ unsigned int low_index = base & (BITS_PER_LONG-1);
-+ int length = low_index + extent;
-+
-+ if (low_index != 0) {
-+ mask = (~0UL << low_index);
-+ if (length < BITS_PER_LONG)
-+ mask &= ~(~0UL << length);
-+ if (new_value)
-+ *bitmap_base++ |= mask;
-+ else
-+ *bitmap_base++ &= ~mask;
-+ length -= BITS_PER_LONG;
-+ }
-+
-+ mask = (new_value ? ~0UL : 0UL);
-+ while (length >= BITS_PER_LONG) {
-+ *bitmap_base++ = mask;
-+ length -= BITS_PER_LONG;
-+ }
-+
-+ if (length > 0) {
-+ mask = ~(~0UL << length);
-+ if (new_value)
-+ *bitmap_base++ |= mask;
-+ else
-+ *bitmap_base++ &= ~mask;
-+ }
-+}
-+
-+
-+/*
-+ * this changes the io permissions bitmap in the current task.
-+ */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-+{
-+ struct thread_struct * t = &current->thread;
-+ unsigned long *bitmap;
-+ physdev_op_t op;
-+
-+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
-+ return -EINVAL;
-+ if (turn_on && !capable(CAP_SYS_RAWIO))
-+ return -EPERM;
-+
-+ /*
-+ * If it's the first ioperm() call in this thread's lifetime, set the
-+ * IO bitmap up. ioperm() is much less timing critical than clone(),
-+ * this is why we delay this operation until now:
-+ */
-+ if (!t->io_bitmap_ptr) {
-+ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+ if (!bitmap)
-+ return -ENOMEM;
-+
-+ memset(bitmap, 0xff, IO_BITMAP_BYTES);
-+ t->io_bitmap_ptr = bitmap;
-+
-+ op.cmd = PHYSDEVOP_SET_IOBITMAP;
-+ op.u.set_iobitmap.bitmap = (char *)bitmap;
-+ op.u.set_iobitmap.nr_ports = IO_BITMAP_BITS;
-+ HYPERVISOR_physdev_op(&op);
-+ }
-+
-+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
-+
-+ return 0;
-+}
-+
-+/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ * Here we just change the eflags value on the stack: we allow
-+ * only the super-user to do it. This depends on the stack-layout
-+ * on system-call entry - see also fork() and the signal handling
-+ * code.
-+ */
-+
-+asmlinkage long sys_iopl(unsigned long unused)
-+{
-+ volatile struct pt_regs * regs = (struct pt_regs *) &unused;
-+ unsigned int level = regs->ebx;
-+ struct thread_struct *t = &current->thread;
-+ unsigned int old = (t->iopl >> 12) & 3;
-+
-+ if (level > 3)
-+ return -EINVAL;
-+ /* Trying to gain more privileges? */
-+ if (level > old) {
-+ if (!capable(CAP_SYS_RAWIO))
-+ return -EPERM;
-+ }
-+ t->iopl = level << 12;
-+ set_iopl_mask(t->iopl);
-+ return 0;
-+}
-diff --git a/arch/i386/kernel/irq-xen.c b/arch/i386/kernel/irq-xen.c
-new file mode 100644
-index 0000000..d4b9753
---- /dev/null
-+++ b/arch/i386/kernel/irq-xen.c
-@@ -0,0 +1,306 @@
-+/*
-+ * linux/arch/i386/kernel/irq.c
-+ *
-+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86-specific interrupt
-+ * entry, irq-stacks and irq statistics code. All the remaining
-+ * irq logic is done by the generic kernel/irq/ code and
-+ * by the x86-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
-+
-+#include <asm/uaccess.h>
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/delay.h>
-+
-+DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
-+EXPORT_PER_CPU_SYMBOL(irq_stat);
-+
-+#ifndef CONFIG_X86_LOCAL_APIC
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+ printk("unexpected IRQ trap at vector %02x\n", irq);
-+}
-+#endif
-+
-+#ifdef CONFIG_4KSTACKS
-+/*
-+ * per-CPU IRQ handling contexts (thread information and stack)
-+ */
-+union irq_ctx {
-+ struct thread_info tinfo;
-+ u32 stack[THREAD_SIZE/sizeof(u32)];
-+};
-+
-+static union irq_ctx *hardirq_ctx[NR_CPUS];
-+static union irq_ctx *softirq_ctx[NR_CPUS];
-+#endif
-+
-+/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
-+ */
-+fastcall unsigned int do_IRQ(struct pt_regs *regs)
-+{
-+ /* high bits used in ret_from_ code */
-+ int irq = regs->orig_eax & __IRQ_MASK(HARDIRQ_BITS);
-+#ifdef CONFIG_4KSTACKS
-+ union irq_ctx *curctx, *irqctx;
-+ u32 *isp;
-+#endif
-+
-+ irq_enter();
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+ /* Debugging check for stack overflow: is there less than 1KB free? */
-+ {
-+ long esp;
-+
-+ __asm__ __volatile__("andl %%esp,%0" :
-+ "=r" (esp) : "0" (THREAD_SIZE - 1));
-+ if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
-+ printk("do_IRQ: stack overflow: %ld\n",
-+ esp - sizeof(struct thread_info));
-+ dump_stack();
-+ }
-+ }
-+#endif
-+
-+#ifdef CONFIG_4KSTACKS
-+
-+ curctx = (union irq_ctx *) current_thread_info();
-+ irqctx = hardirq_ctx[smp_processor_id()];
-+
-+ /*
-+ * this is where we switch to the IRQ stack. However, if we are
-+ * already using the IRQ stack (because we interrupted a hardirq
-+ * handler) we can't do that and just have to keep using the
-+ * current stack (which is the irq stack already after all)
-+ */
-+ if (curctx != irqctx) {
-+ int arg1, arg2, ebx;
-+
-+ /* build the stack frame on the IRQ stack */
-+ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+ irqctx->tinfo.task = curctx->tinfo.task;
-+ irqctx->tinfo.previous_esp = current_stack_pointer;
-+
-+ asm volatile(
-+ " xchgl %%ebx,%%esp \n"
-+ " call __do_IRQ \n"
-+ " movl %%ebx,%%esp \n"
-+ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
-+ : "0" (irq), "1" (regs), "2" (isp)
-+ : "memory", "cc", "ecx"
-+ );
-+ } else
-+#endif
-+ __do_IRQ(irq, regs);
-+
-+ irq_exit();
-+
-+ return 1;
-+}
-+
-+#ifdef CONFIG_4KSTACKS
-+
-+/*
-+ * These should really be __section__(".bss.page_aligned") as well, but
-+ * gcc's 3.0 and earlier don't handle that correctly.
-+ */
-+static char softirq_stack[NR_CPUS * THREAD_SIZE]
-+ __attribute__((__aligned__(THREAD_SIZE)));
-+
-+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
-+ __attribute__((__aligned__(THREAD_SIZE)));
-+
-+/*
-+ * allocate per-cpu stacks for hardirq and for softirq processing
-+ */
-+void irq_ctx_init(int cpu)
-+{
-+ union irq_ctx *irqctx;
-+
-+ if (hardirq_ctx[cpu])
-+ return;
-+
-+ irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
-+ irqctx->tinfo.task = NULL;
-+ irqctx->tinfo.exec_domain = NULL;
-+ irqctx->tinfo.cpu = cpu;
-+ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
-+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
-+
-+ hardirq_ctx[cpu] = irqctx;
-+
-+ irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
-+ irqctx->tinfo.task = NULL;
-+ irqctx->tinfo.exec_domain = NULL;
-+ irqctx->tinfo.cpu = cpu;
-+ irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET;
-+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
-+
-+ softirq_ctx[cpu] = irqctx;
-+
-+ printk("CPU %u irqstacks, hard=%p soft=%p\n",
-+ cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
-+}
-+
-+void irq_ctx_exit(int cpu)
-+{
-+ hardirq_ctx[cpu] = NULL;
-+}
-+
-+extern asmlinkage void __do_softirq(void);
-+
-+asmlinkage void do_softirq(void)
-+{
-+ unsigned long flags;
-+ struct thread_info *curctx;
-+ union irq_ctx *irqctx;
-+ u32 *isp;
-+
-+ if (in_interrupt())
-+ return;
-+
-+ local_irq_save(flags);
-+
-+ if (local_softirq_pending()) {
-+ curctx = current_thread_info();
-+ irqctx = softirq_ctx[smp_processor_id()];
-+ irqctx->tinfo.task = curctx->task;
-+ irqctx->tinfo.previous_esp = current_stack_pointer;
-+
-+ /* build the stack frame on the softirq stack */
-+ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+
-+ asm volatile(
-+ " xchgl %%ebx,%%esp \n"
-+ " call __do_softirq \n"
-+ " movl %%ebx,%%esp \n"
-+ : "=b"(isp)
-+ : "0"(isp)
-+ : "memory", "cc", "edx", "ecx", "eax"
-+ );
-+ }
-+
-+ local_irq_restore(flags);
-+}
-+
-+EXPORT_SYMBOL(do_softirq);
-+#endif
-+
-+/*
-+ * Interrupt statistics:
-+ */
-+
-+atomic_t irq_err_count;
-+
-+/*
-+ * /proc/interrupts printing:
-+ */
-+
-+int show_interrupts(struct seq_file *p, void *v)
-+{
-+ int i = *(loff_t *) v, j;
-+ struct irqaction * action;
-+ unsigned long flags;
-+
-+ if (i == 0) {
-+ seq_printf(p, " ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "CPU%d ",j);
-+ seq_putc(p, '\n');
-+ }
-+
-+ if (i < NR_IRQS) {
-+ spin_lock_irqsave(&irq_desc[i].lock, flags);
-+ action = irq_desc[i].action;
-+ if (!action)
-+ goto skip;
-+ seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+ seq_printf(p, "%10u ", kstat_irqs(i));
-+#else
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-+#endif
-+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
-+ seq_printf(p, " %s", action->name);
-+
-+ for (action=action->next; action; action = action->next)
-+ seq_printf(p, ", %s", action->name);
-+
-+ seq_putc(p, '\n');
-+skip:
-+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+ } else if (i == NR_IRQS) {
-+ seq_printf(p, "NMI: ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ", nmi_count(j));
-+ seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ seq_printf(p, "LOC: ");
-+ for_each_online_cpu(j)
-+ seq_printf(p, "%10u ",
-+ per_cpu(irq_stat,j).apic_timer_irqs);
-+ seq_putc(p, '\n');
-+#endif
-+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+#if defined(CONFIG_X86_IO_APIC)
-+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-+#endif
-+ }
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+void fixup_irqs(cpumask_t map)
-+{
-+ unsigned int irq;
-+ static int warned;
-+
-+ for (irq = 0; irq < NR_IRQS; irq++) {
-+ cpumask_t mask;
-+ if (irq == 2)
-+ continue;
-+
-+ cpus_and(mask, irq_affinity[irq], map);
-+ if (any_online_cpu(mask) == NR_CPUS) {
-+ /*printk("Breaking affinity for irq %i\n", irq);*/
-+ mask = map;
-+ }
-+ if (irq_desc[irq].handler->set_affinity)
-+ irq_desc[irq].handler->set_affinity(irq, mask);
-+ else if (irq_desc[irq].action && !(warned++))
-+ printk("Cannot set affinity for irq %i\n", irq);
-+ }
-+
-+#if 0
-+ barrier();
-+ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
-+ [note the nop - the interrupt-enable boundary on x86 is two
-+ instructions from sti] - to flush out pending hardirqs and
-+ IPIs. After this point nothing is supposed to reach this CPU." */
-+ __asm__ __volatile__("sti; nop; cli");
-+ barrier();
-+#else
-+ /* That doesn't seem sufficient. Give it 1ms. */
-+ local_irq_enable();
-+ mdelay(1);
-+ local_irq_disable();
-+#endif
-+}
-+#endif
-+
-diff --git a/arch/i386/kernel/ldt-xen.c b/arch/i386/kernel/ldt-xen.c
-new file mode 100644
-index 0000000..06970d9
---- /dev/null
-+++ b/arch/i386/kernel/ldt-xen.c
-@@ -0,0 +1,269 @@
-+/*
-+ * linux/kernel/ldt.c
-+ *
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/slab.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/mmu_context.h>
-+
-+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-+static void flush_ldt(void *null)
-+{
-+ if (current->active_mm)
-+ load_LDT(&current->active_mm->context);
-+}
-+#endif
-+
-+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
-+{
-+ void *oldldt;
-+ void *newldt;
-+ int oldsize;
-+
-+ if (mincount <= pc->size)
-+ return 0;
-+ oldsize = pc->size;
-+ mincount = (mincount+511)&(~511);
-+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-+ else
-+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-+
-+ if (!newldt)
-+ return -ENOMEM;
-+
-+ if (oldsize)
-+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-+ oldldt = pc->ldt;
-+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-+ pc->ldt = newldt;
-+ wmb();
-+ pc->size = mincount;
-+ wmb();
-+
-+ if (reload) {
-+#ifdef CONFIG_SMP
-+ cpumask_t mask;
-+ preempt_disable();
-+#endif
-+ make_pages_readonly(
-+ pc->ldt,
-+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ load_LDT(pc);
-+#ifdef CONFIG_SMP
-+ mask = cpumask_of_cpu(smp_processor_id());
-+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+ smp_call_function(flush_ldt, NULL, 1, 1);
-+ preempt_enable();
-+#endif
-+ }
-+ if (oldsize) {
-+ make_pages_writable(
-+ oldldt,
-+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ vfree(oldldt);
-+ else
-+ kfree(oldldt);
-+ }
-+ return 0;
-+}
-+
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-+{
-+ int err = alloc_ldt(new, old->size, 0);
-+ if (err < 0)
-+ return err;
-+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-+ make_pages_readonly(
-+ new->ldt,
-+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ return 0;
-+}
-+
-+/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-+{
-+ struct mm_struct * old_mm;
-+ int retval = 0;
-+
-+ init_MUTEX(&mm->context.sem);
-+ mm->context.size = 0;
-+ old_mm = current->mm;
-+ if (old_mm && old_mm->context.size > 0) {
-+ down(&old_mm->context.sem);
-+ retval = copy_ldt(&mm->context, &old_mm->context);
-+ up(&old_mm->context.sem);
-+ }
-+ return retval;
-+}
-+
-+/*
-+ * No need to lock the MM as we are the last user
-+ */
-+void destroy_context(struct mm_struct *mm)
-+{
-+ if (mm->context.size) {
-+ if (mm == current->active_mm)
-+ clear_LDT();
-+ make_pages_writable(
-+ mm->context.ldt,
-+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ vfree(mm->context.ldt);
-+ else
-+ kfree(mm->context.ldt);
-+ mm->context.size = 0;
-+ }
-+}
-+
-+static int read_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+ int err;
-+ unsigned long size;
-+ struct mm_struct * mm = current->mm;
-+
-+ if (!mm->context.size)
-+ return 0;
-+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-+
-+ down(&mm->context.sem);
-+ size = mm->context.size*LDT_ENTRY_SIZE;
-+ if (size > bytecount)
-+ size = bytecount;
-+
-+ err = 0;
-+ if (copy_to_user(ptr, mm->context.ldt, size))
-+ err = -EFAULT;
-+ up(&mm->context.sem);
-+ if (err < 0)
-+ goto error_return;
-+ if (size != bytecount) {
-+ /* zero-fill the rest */
-+ if (clear_user(ptr+size, bytecount-size) != 0) {
-+ err = -EFAULT;
-+ goto error_return;
-+ }
-+ }
-+ return bytecount;
-+error_return:
-+ return err;
-+}
-+
-+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+ int err;
-+ unsigned long size;
-+ void *address;
-+
-+ err = 0;
-+ address = &default_ldt[0];
-+ size = 5*sizeof(struct desc_struct);
-+ if (size > bytecount)
-+ size = bytecount;
-+
-+ err = size;
-+ if (copy_to_user(ptr, address, size))
-+ err = -EFAULT;
-+
-+ return err;
-+}
-+
-+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
-+{
-+ struct mm_struct * mm = current->mm;
-+ __u32 entry_1, entry_2;
-+ int error;
-+ struct user_desc ldt_info;
-+
-+ error = -EINVAL;
-+ if (bytecount != sizeof(ldt_info))
-+ goto out;
-+ error = -EFAULT;
-+ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
-+ goto out;
-+
-+ error = -EINVAL;
-+ if (ldt_info.entry_number >= LDT_ENTRIES)
-+ goto out;
-+ if (ldt_info.contents == 3) {
-+ if (oldmode)
-+ goto out;
-+ if (ldt_info.seg_not_present == 0)
-+ goto out;
-+ }
-+
-+ down(&mm->context.sem);
-+ if (ldt_info.entry_number >= mm->context.size) {
-+ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-+ if (error < 0)
-+ goto out_unlock;
-+ }
-+
-+ /* Allow LDTs to be cleared by the user. */
-+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+ if (oldmode || LDT_empty(&ldt_info)) {
-+ entry_1 = 0;
-+ entry_2 = 0;
-+ goto install;
-+ }
-+ }
-+
-+ entry_1 = LDT_entry_a(&ldt_info);
-+ entry_2 = LDT_entry_b(&ldt_info);
-+ if (oldmode)
-+ entry_2 &= ~(1 << 20);
-+
-+ /* Install the new entry ... */
-+install:
-+ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
-+ entry_1, entry_2);
-+
-+out_unlock:
-+ up(&mm->context.sem);
-+out:
-+ return error;
-+}
-+
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
-+{
-+ int ret = -ENOSYS;
-+
-+ switch (func) {
-+ case 0:
-+ ret = read_ldt(ptr, bytecount);
-+ break;
-+ case 1:
-+ ret = write_ldt(ptr, bytecount, 1);
-+ break;
-+ case 2:
-+ ret = read_default_ldt(ptr, bytecount);
-+ break;
-+ case 0x11:
-+ ret = write_ldt(ptr, bytecount, 0);
-+ break;
-+ }
-+ return ret;
-+}
-diff --git a/arch/i386/kernel/microcode-xen.c b/arch/i386/kernel/microcode-xen.c
-new file mode 100644
-index 0000000..a0e1487
---- /dev/null
-+++ b/arch/i386/kernel/microcode-xen.c
-@@ -0,0 +1,164 @@
-+/*
-+ * Intel CPU Microcode Update Driver for Linux
-+ *
-+ * Copyright (C) 2000-2004 Tigran Aivazian
-+ *
-+ * This driver allows to upgrade microcode on Intel processors
-+ * belonging to IA-32 family - PentiumPro, Pentium II,
-+ * Pentium III, Xeon, Pentium 4, etc.
-+ *
-+ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
-+ * Order Number 245472 or free download from:
-+ *
-+ * http://developer.intel.com/design/pentium4/manuals/245472.htm
-+ *
-+ * For more information, go to http://www.urbanmyth.org/microcode
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version
-+ * 2 of the License, or (at your option) any later version.
-+ */
-+
-+//#define DEBUG /* pr_debug */
-+#include <linux/capability.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/miscdevice.h>
-+#include <linux/spinlock.h>
-+#include <linux/mm.h>
-+#include <linux/syscalls.h>
-+
-+#include <asm/msr.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+
-+MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
-+MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
-+MODULE_LICENSE("GPL");
-+
-+#define MICROCODE_VERSION "1.14-xen"
-+
-+#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
-+#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
-+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
-+
-+/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
-+static DECLARE_MUTEX(microcode_sem);
-+
-+static void __user *user_buffer; /* user area microcode data buffer */
-+static unsigned int user_buffer_size; /* it's size */
-+
-+static int microcode_open (struct inode *unused1, struct file *unused2)
-+{
-+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-+}
-+
-+
-+static int do_microcode_update (void)
-+{
-+ int err;
-+ dom0_op_t op;
-+
-+ err = sys_mlock((unsigned long)user_buffer, user_buffer_size);
-+ if (err != 0)
-+ return err;
-+
-+ op.cmd = DOM0_MICROCODE;
-+ op.u.microcode.data = user_buffer;
-+ op.u.microcode.length = user_buffer_size;
-+ err = HYPERVISOR_dom0_op(&op);
-+
-+ (void)sys_munlock((unsigned long)user_buffer, user_buffer_size);
-+
-+ return err;
-+}
-+
-+static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
-+{
-+ ssize_t ret;
-+
-+ if (len < DEFAULT_UCODE_TOTALSIZE) {
-+ printk(KERN_ERR "microcode: not enough data\n");
-+ return -EINVAL;
-+ }
-+
-+ if ((len >> PAGE_SHIFT) > num_physpages) {
-+ printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages);
-+ return -EINVAL;
-+ }
-+
-+ down(&microcode_sem);
-+
-+ user_buffer = (void __user *) buf;
-+ user_buffer_size = (int) len;
-+
-+ ret = do_microcode_update();
-+ if (!ret)
-+ ret = (ssize_t)len;
-+
-+ up(&microcode_sem);
-+
-+ return ret;
-+}
-+
-+static int microcode_ioctl (struct inode *inode, struct file *file,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ switch (cmd) {
-+ /*
-+ * XXX: will be removed after microcode_ctl
-+ * is updated to ignore failure of this ioctl()
-+ */
-+ case MICROCODE_IOCFREE:
-+ return 0;
-+ default:
-+ return -EINVAL;
-+ }
-+ return -EINVAL;
-+}
-+
-+static struct file_operations microcode_fops = {
-+ .owner = THIS_MODULE,
-+ .write = microcode_write,
-+ .ioctl = microcode_ioctl,
-+ .open = microcode_open,
-+};
-+
-+static struct miscdevice microcode_dev = {
-+ .minor = MICROCODE_MINOR,
-+ .name = "microcode",
-+ .devfs_name = "cpu/microcode",
-+ .fops = &microcode_fops,
-+};
-+
-+static int __init microcode_init (void)
-+{
-+ int error;
-+
-+ error = misc_register(&microcode_dev);
-+ if (error) {
-+ printk(KERN_ERR
-+ "microcode: can't misc_register on minor=%d\n",
-+ MICROCODE_MINOR);
-+ return error;
-+ }
-+
-+ printk(KERN_INFO
-+ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
-+ return 0;
-+}
-+
-+static void __exit microcode_exit (void)
-+{
-+ misc_deregister(&microcode_dev);
-+ printk(KERN_INFO "IA-32 Microcode Update Driver v" MICROCODE_VERSION " unregistered\n");
-+}
-+
-+module_init(microcode_init)
-+module_exit(microcode_exit)
-+MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
-diff --git a/arch/i386/kernel/mpparse-xen.c b/arch/i386/kernel/mpparse-xen.c
-new file mode 100644
-index 0000000..f5daedb
---- /dev/null
-+++ b/arch/i386/kernel/mpparse-xen.c
-@@ -0,0 +1,1188 @@
-+/*
-+ * Intel Multiprocessor Specification 1.1 and 1.4
-+ * compliant MP-table parsing routines.
-+ *
-+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
-+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Erich Boleyn : MP v1.4 and additional changes.
-+ * Alan Cox : Added EBDA scanning
-+ * Ingo Molnar : various cleanups and rewrites
-+ * Maciej W. Rozycki: Bits for default MP configurations
-+ * Paul Diefenbaugh: Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/acpi.h>
-+#include <linux/delay.h>
-+#include <linux/config.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/bitops.h>
-+
-+#include <asm/smp.h>
-+#include <asm/acpi.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/io_apic.h>
-+
-+#include <mach_apic.h>
-+#include <mach_mpparse.h>
-+#include <bios_ebda.h>
-+
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __initdata maxcpus = NR_CPUS;
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+#define CPU_HOTPLUG_ENABLED (1)
-+#else
-+#define CPU_HOTPLUG_ENABLED (0)
-+#endif
-+
-+/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
-+ */
-+int apic_version [MAX_APICS];
-+int mp_bus_id_to_type [MAX_MP_BUSSES];
-+int mp_bus_id_to_node [MAX_MP_BUSSES];
-+int mp_bus_id_to_local [MAX_MP_BUSSES];
-+int quad_local_to_mp_bus_id [NR_CPUS/4][4];
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+static int mp_current_pci_id;
-+
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-+
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-+
-+/* MP IRQ source entries */
-+int mp_irq_entries;
-+
-+int nr_ioapics;
-+
-+int pic_mode;
-+unsigned long mp_lapic_addr;
-+
-+unsigned int def_to_bigsmp = 0;
-+
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_physical_apicid = -1U;
-+/* Internal processor count */
-+static unsigned int __devinitdata num_processors;
-+
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map;
-+
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
-+
-+
-+/*
-+ * Checksum an MP configuration block.
-+ */
-+
-+static int __init mpf_checksum(unsigned char *mp, int len)
-+{
-+ int sum = 0;
-+
-+ while (len--)
-+ sum += *mp++;
-+
-+ return sum & 0xFF;
-+}
-+
-+/*
-+ * Have to match translation table entries to main table entries by counter
-+ * hence the mpc_record variable .... can't see a less disgusting way of
-+ * doing this ....
-+ */
-+
-+static int mpc_record;
-+static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
-+
-+#ifdef CONFIG_X86_NUMAQ
-+static int MP_valid_apicid(int apicid, int version)
-+{
-+ return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf;
-+}
-+#elif !defined(CONFIG_XEN)
-+static int MP_valid_apicid(int apicid, int version)
-+{
-+ if (version >= 0x14)
-+ return apicid < 0xff;
-+ else
-+ return apicid < 0xf;
-+}
-+#endif
-+
-+#ifndef CONFIG_XEN
-+static void __devinit MP_processor_info (struct mpc_config_processor *m)
-+{
-+ int ver, apicid;
-+ physid_mask_t phys_cpu;
-+
-+ if (!(m->mpc_cpuflag & CPU_ENABLED))
-+ return;
-+
-+ apicid = mpc_apic_id(m, translation_table[mpc_record]);
-+
-+ if (m->mpc_featureflag&(1<<0))
-+ Dprintk(" Floating point unit present.\n");
-+ if (m->mpc_featureflag&(1<<7))
-+ Dprintk(" Machine Exception supported.\n");
-+ if (m->mpc_featureflag&(1<<8))
-+ Dprintk(" 64 bit compare & exchange supported.\n");
-+ if (m->mpc_featureflag&(1<<9))
-+ Dprintk(" Internal APIC present.\n");
-+ if (m->mpc_featureflag&(1<<11))
-+ Dprintk(" SEP present.\n");
-+ if (m->mpc_featureflag&(1<<12))
-+ Dprintk(" MTRR present.\n");
-+ if (m->mpc_featureflag&(1<<13))
-+ Dprintk(" PGE present.\n");
-+ if (m->mpc_featureflag&(1<<14))
-+ Dprintk(" MCA present.\n");
-+ if (m->mpc_featureflag&(1<<15))
-+ Dprintk(" CMOV present.\n");
-+ if (m->mpc_featureflag&(1<<16))
-+ Dprintk(" PAT present.\n");
-+ if (m->mpc_featureflag&(1<<17))
-+ Dprintk(" PSE present.\n");
-+ if (m->mpc_featureflag&(1<<18))
-+ Dprintk(" PSN present.\n");
-+ if (m->mpc_featureflag&(1<<19))
-+ Dprintk(" Cache Line Flush Instruction present.\n");
-+ /* 20 Reserved */
-+ if (m->mpc_featureflag&(1<<21))
-+ Dprintk(" Debug Trace and EMON Store present.\n");
-+ if (m->mpc_featureflag&(1<<22))
-+ Dprintk(" ACPI Thermal Throttle Registers present.\n");
-+ if (m->mpc_featureflag&(1<<23))
-+ Dprintk(" MMX present.\n");
-+ if (m->mpc_featureflag&(1<<24))
-+ Dprintk(" FXSR present.\n");
-+ if (m->mpc_featureflag&(1<<25))
-+ Dprintk(" XMM present.\n");
-+ if (m->mpc_featureflag&(1<<26))
-+ Dprintk(" Willamette New Instructions present.\n");
-+ if (m->mpc_featureflag&(1<<27))
-+ Dprintk(" Self Snoop present.\n");
-+ if (m->mpc_featureflag&(1<<28))
-+ Dprintk(" HT present.\n");
-+ if (m->mpc_featureflag&(1<<29))
-+ Dprintk(" Thermal Monitor present.\n");
-+ /* 30, 31 Reserved */
-+
-+
-+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ Dprintk(" Bootup CPU\n");
-+ boot_cpu_physical_apicid = m->mpc_apicid;
-+ }
-+
-+ ver = m->mpc_apicver;
-+
-+ if (!MP_valid_apicid(apicid, ver)) {
-+ printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n",
-+ m->mpc_apicid, MAX_APICS);
-+ return;
-+ }
-+
-+ /*
-+ * Validate version
-+ */
-+ if (ver == 0x0) {
-+ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
-+ "fixing up to 0x10. (tell your hw vendor)\n",
-+ m->mpc_apicid);
-+ ver = 0x10;
-+ }
-+ apic_version[m->mpc_apicid] = ver;
-+
-+ phys_cpu = apicid_to_cpu_present(apicid);
-+ physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
-+
-+ if (num_processors >= NR_CPUS) {
-+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+ " Processor ignored.\n", NR_CPUS);
-+ return;
-+ }
-+
-+ if (num_processors >= maxcpus) {
-+ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
-+ " Processor ignored.\n", maxcpus);
-+ return;
-+ }
-+
-+ cpu_set(num_processors, cpu_possible_map);
-+ num_processors++;
-+
-+ if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) {
-+ switch (boot_cpu_data.x86_vendor) {
-+ case X86_VENDOR_INTEL:
-+ if (!APIC_XAPIC(ver)) {
-+ def_to_bigsmp = 0;
-+ break;
-+ }
-+ /* If P4 and above fall through */
-+ case X86_VENDOR_AMD:
-+ def_to_bigsmp = 1;
-+ }
-+ }
-+ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
-+}
-+#else
-+void __init MP_processor_info (struct mpc_config_processor *m)
-+{
-+ num_processors++;
-+}
-+#endif /* CONFIG_XEN */
-+
-+static void __init MP_bus_info (struct mpc_config_bus *m)
-+{
-+ char str[7];
-+
-+ memcpy(str, m->mpc_bustype, 6);
-+ str[6] = 0;
-+
-+ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
-+
-+ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-+ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-+ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
-+ mpc_oem_pci_bus(m, translation_table[mpc_record]);
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+ mp_current_pci_id++;
-+ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-+ } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
-+ } else {
-+ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
-+ }
-+}
-+
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
-+{
-+ if (!(m->mpc_flags & MPC_APIC_USABLE))
-+ return;
-+
-+ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
-+ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-+ if (nr_ioapics >= MAX_IO_APICS) {
-+ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
-+ MAX_IO_APICS, nr_ioapics);
-+ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
-+ }
-+ if (!m->mpc_apicaddr) {
-+ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-+ " found in MP table, skipping!\n");
-+ return;
-+ }
-+ mp_ioapics[nr_ioapics] = *m;
-+ nr_ioapics++;
-+}
-+
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-+{
-+ mp_irqs [mp_irq_entries] = *m;
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+ m->mpc_irqtype, m->mpc_irqflag & 3,
-+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!!\n");
-+}
-+
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-+{
-+ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+ m->mpc_irqtype, m->mpc_irqflag & 3,
-+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-+ /*
-+ * Well it seems all SMP boards in existence
-+ * use ExtINT/LVT1 == LINT0 and
-+ * NMI/LVT2 == LINT1 - the following check
-+ * will show us if this assumptions is false.
-+ * Until then we do not have to add baggage.
-+ */
-+ if ((m->mpc_irqtype == mp_ExtINT) &&
-+ (m->mpc_destapiclint != 0))
-+ BUG();
-+ if ((m->mpc_irqtype == mp_NMI) &&
-+ (m->mpc_destapiclint != 1))
-+ BUG();
-+}
-+
-+#ifdef CONFIG_X86_NUMAQ
-+static void __init MP_translation_info (struct mpc_config_translation *m)
-+{
-+ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
-+
-+ if (mpc_record >= MAX_MPC_ENTRY)
-+ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
-+ else
-+ translation_table[mpc_record] = m; /* stash this for later */
-+ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
-+ node_set_online(m->trans_quad);
-+}
-+
-+/*
-+ * Read/parse the MPC oem tables
-+ */
-+
-+static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
-+ unsigned short oemsize)
-+{
-+ int count = sizeof (*oemtable); /* the header size */
-+ unsigned char *oemptr = ((unsigned char *)oemtable)+count;
-+
-+ mpc_record = 0;
-+ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
-+ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
-+ {
-+ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
-+ oemtable->oem_signature[0],
-+ oemtable->oem_signature[1],
-+ oemtable->oem_signature[2],
-+ oemtable->oem_signature[3]);
-+ return;
-+ }
-+ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
-+ {
-+ printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
-+ return;
-+ }
-+ while (count < oemtable->oem_length) {
-+ switch (*oemptr) {
-+ case MP_TRANSLATION:
-+ {
-+ struct mpc_config_translation *m=
-+ (struct mpc_config_translation *)oemptr;
-+ MP_translation_info(m);
-+ oemptr += sizeof(*m);
-+ count += sizeof(*m);
-+ ++mpc_record;
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
-+ return;
-+ }
-+ }
-+ }
-+}
-+
-+static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
-+ char *productid)
-+{
-+ if (strncmp(oem, "IBM NUMA", 8))
-+ printk("Warning! May not be a NUMA-Q system!\n");
-+ if (mpc->mpc_oemptr)
-+ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
-+ mpc->mpc_oemsize);
-+}
-+#endif /* CONFIG_X86_NUMAQ */
-+
-+/*
-+ * Read/parse the MPC
-+ */
-+
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
-+{
-+ char str[16];
-+ char oem[10];
-+ int count=sizeof(*mpc);
-+ unsigned char *mpt=((unsigned char *)mpc)+count;
-+
-+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
-+ *(u32 *)mpc->mpc_signature);
-+ return 0;
-+ }
-+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+ printk(KERN_ERR "SMP mptable: checksum error!\n");
-+ return 0;
-+ }
-+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-+ mpc->mpc_spec);
-+ return 0;
-+ }
-+ if (!mpc->mpc_lapic) {
-+ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-+ return 0;
-+ }
-+ memcpy(oem,mpc->mpc_oem,8);
-+ oem[8]=0;
-+ printk(KERN_INFO "OEM ID: %s ",oem);
-+
-+ memcpy(str,mpc->mpc_productid,12);
-+ str[12]=0;
-+ printk("Product ID: %s ",str);
-+
-+ mps_oem_check(mpc, oem, str);
-+
-+ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
-+
-+ /*
-+ * Save the local APIC address (it might be non-default) -- but only
-+ * if we're not using ACPI.
-+ */
-+ if (!acpi_lapic)
-+ mp_lapic_addr = mpc->mpc_lapic;
-+
-+ /*
-+ * Now process the configuration blocks.
-+ */
-+ mpc_record = 0;
-+ while (count < mpc->mpc_length) {
-+ switch(*mpt) {
-+ case MP_PROCESSOR:
-+ {
-+ struct mpc_config_processor *m=
-+ (struct mpc_config_processor *)mpt;
-+ /* ACPI may have already provided this data */
-+ if (!acpi_lapic)
-+ MP_processor_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_BUS:
-+ {
-+ struct mpc_config_bus *m=
-+ (struct mpc_config_bus *)mpt;
-+ MP_bus_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_IOAPIC:
-+ {
-+ struct mpc_config_ioapic *m=
-+ (struct mpc_config_ioapic *)mpt;
-+ MP_ioapic_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ case MP_INTSRC:
-+ {
-+ struct mpc_config_intsrc *m=
-+ (struct mpc_config_intsrc *)mpt;
-+
-+ MP_intsrc_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ case MP_LINTSRC:
-+ {
-+ struct mpc_config_lintsrc *m=
-+ (struct mpc_config_lintsrc *)mpt;
-+ MP_lintsrc_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ default:
-+ {
-+ count = mpc->mpc_length;
-+ break;
-+ }
-+ }
-+ ++mpc_record;
-+ }
-+ clustered_apic_check();
-+ if (!num_processors)
-+ printk(KERN_ERR "SMP mptable: no processors registered!\n");
-+ return num_processors;
-+}
-+
-+static int __init ELCR_trigger(unsigned int irq)
-+{
-+ unsigned int port;
-+
-+ port = 0x4d0 + (irq >> 3);
-+ return (inb(port) >> (irq & 7)) & 1;
-+}
-+
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int i;
-+ int ELCR_fallback = 0;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqflag = 0; /* conforming */
-+ intsrc.mpc_srcbus = 0;
-+ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-+
-+ intsrc.mpc_irqtype = mp_INT;
-+
-+ /*
-+ * If true, we have an ISA/PCI system with no IRQ entries
-+ * in the MP table. To prevent the PCI interrupts from being set up
-+ * incorrectly, we try to use the ELCR. The sanity check to see if
-+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+ * never be level sensitive, so we simply see if the ELCR agrees.
-+ * If it does, we assume it's valid.
-+ */
-+ if (mpc_default_type == 5) {
-+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-+
-+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
-+ else {
-+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+ ELCR_fallback = 1;
-+ }
-+ }
-+
-+ for (i = 0; i < 16; i++) {
-+ switch (mpc_default_type) {
-+ case 2:
-+ if (i == 0 || i == 13)
-+ continue; /* IRQ0 & IRQ13 not connected */
-+ /* fall through */
-+ default:
-+ if (i == 2)
-+ continue; /* IRQ2 is never connected */
-+ }
-+
-+ if (ELCR_fallback) {
-+ /*
-+ * If the ELCR indicates a level-sensitive interrupt, we
-+ * copy that information over to the MP table in the
-+ * irqflag field (level sensitive, active high polarity).
-+ */
-+ if (ELCR_trigger(i))
-+ intsrc.mpc_irqflag = 13;
-+ else
-+ intsrc.mpc_irqflag = 0;
-+ }
-+
-+ intsrc.mpc_srcbusirq = i;
-+ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
-+ MP_intsrc_info(&intsrc);
-+ }
-+
-+ intsrc.mpc_irqtype = mp_ExtINT;
-+ intsrc.mpc_srcbusirq = 0;
-+ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
-+ MP_intsrc_info(&intsrc);
-+}
-+
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-+{
-+ struct mpc_config_processor processor;
-+ struct mpc_config_bus bus;
-+ struct mpc_config_ioapic ioapic;
-+ struct mpc_config_lintsrc lintsrc;
-+ int linttypes[2] = { mp_ExtINT, mp_NMI };
-+ int i;
-+
-+ /*
-+ * local APIC has default address
-+ */
-+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-+
-+ /*
-+ * 2 CPUs, numbered 0 & 1.
-+ */
-+ processor.mpc_type = MP_PROCESSOR;
-+ /* Either an integrated APIC or a discrete 82489DX. */
-+ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+ processor.mpc_cpuflag = CPU_ENABLED;
-+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+ (boot_cpu_data.x86_model << 4) |
-+ boot_cpu_data.x86_mask;
-+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+ processor.mpc_reserved[0] = 0;
-+ processor.mpc_reserved[1] = 0;
-+ for (i = 0; i < 2; i++) {
-+ processor.mpc_apicid = i;
-+ MP_processor_info(&processor);
-+ }
-+
-+ bus.mpc_type = MP_BUS;
-+ bus.mpc_busid = 0;
-+ switch (mpc_default_type) {
-+ default:
-+ printk("???\n");
-+ printk(KERN_ERR "Unknown standard configuration %d\n",
-+ mpc_default_type);
-+ /* fall through */
-+ case 1:
-+ case 5:
-+ memcpy(bus.mpc_bustype, "ISA ", 6);
-+ break;
-+ case 2:
-+ case 6:
-+ case 3:
-+ memcpy(bus.mpc_bustype, "EISA ", 6);
-+ break;
-+ case 4:
-+ case 7:
-+ memcpy(bus.mpc_bustype, "MCA ", 6);
-+ }
-+ MP_bus_info(&bus);
-+ if (mpc_default_type > 4) {
-+ bus.mpc_busid = 1;
-+ memcpy(bus.mpc_bustype, "PCI ", 6);
-+ MP_bus_info(&bus);
-+ }
-+
-+ ioapic.mpc_type = MP_IOAPIC;
-+ ioapic.mpc_apicid = 2;
-+ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+ ioapic.mpc_flags = MPC_APIC_USABLE;
-+ ioapic.mpc_apicaddr = 0xFEC00000;
-+ MP_ioapic_info(&ioapic);
-+
-+ /*
-+ * We set up most of the low 16 IO-APIC pins according to MPS rules.
-+ */
-+ construct_default_ioirq_mptable(mpc_default_type);
-+
-+ lintsrc.mpc_type = MP_LINTSRC;
-+ lintsrc.mpc_irqflag = 0; /* conforming */
-+ lintsrc.mpc_srcbusid = 0;
-+ lintsrc.mpc_srcbusirq = 0;
-+ lintsrc.mpc_destapic = MP_APIC_ALL;
-+ for (i = 0; i < 2; i++) {
-+ lintsrc.mpc_irqtype = linttypes[i];
-+ lintsrc.mpc_destapiclint = i;
-+ MP_lintsrc_info(&lintsrc);
-+ }
-+}
-+
-+static struct intel_mp_floating *mpf_found;
-+
-+/*
-+ * Scan the memory blocks for an SMP configuration block.
-+ */
-+void __init get_smp_config (void)
-+{
-+ struct intel_mp_floating *mpf = mpf_found;
-+
-+ /*
-+ * ACPI supports both logical (e.g. Hyper-Threading) and physical
-+ * processors, where MPS only supports physical.
-+ */
-+ if (acpi_lapic && acpi_ioapic) {
-+ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+ return;
-+ }
-+ else if (acpi_lapic)
-+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-+
-+ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+ if (mpf->mpf_feature2 & (1<<7)) {
-+ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
-+ pic_mode = 1;
-+ } else {
-+ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
-+ pic_mode = 0;
-+ }
-+
-+ /*
-+ * Now see if we need to read further.
-+ */
-+ if (mpf->mpf_feature1 != 0) {
-+
-+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+ construct_default_ISA_mptable(mpf->mpf_feature1);
-+
-+ } else if (mpf->mpf_physptr) {
-+
-+ /*
-+ * Read the physical hardware table. Anything here will
-+ * override the defaults.
-+ */
-+ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+ smp_found_config = 0;
-+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+ return;
-+ }
-+ /*
-+ * If there are no explicit MP IRQ entries, then we are
-+ * broken. We set up most of the low 16 IO-APIC pins to
-+ * ISA defaults and hope it will work.
-+ */
-+ if (!mp_irq_entries) {
-+ struct mpc_config_bus bus;
-+
-+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
-+
-+ bus.mpc_type = MP_BUS;
-+ bus.mpc_busid = 0;
-+ memcpy(bus.mpc_bustype, "ISA ", 6);
-+ MP_bus_info(&bus);
-+
-+ construct_default_ioirq_mptable(0);
-+ }
-+
-+ } else
-+ BUG();
-+
-+ printk(KERN_INFO "Processors: %d\n", num_processors);
-+ /*
-+ * Only use the first configuration found.
-+ */
-+}
-+
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
-+{
-+ unsigned long *bp = isa_bus_to_virt(base);
-+ struct intel_mp_floating *mpf;
-+
-+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+ if (sizeof(*mpf) != 16)
-+ printk("Error: MPF size\n");
-+
-+ while (length > 0) {
-+ mpf = (struct intel_mp_floating *)bp;
-+ if ((*bp == SMP_MAGIC_IDENT) &&
-+ (mpf->mpf_length == 1) &&
-+ !mpf_checksum((unsigned char *)bp, 16) &&
-+ ((mpf->mpf_specification == 1)
-+ || (mpf->mpf_specification == 4)) ) {
-+
-+ smp_found_config = 1;
-+#ifndef CONFIG_XEN
-+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
-+ virt_to_phys(mpf));
-+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
-+ if (mpf->mpf_physptr) {
-+ /*
-+ * We cannot access to MPC table to compute
-+ * table size yet, as only few megabytes from
-+ * the bottom is mapped now.
-+ * PC-9800's MPC table places on the very last
-+ * of physical memory; so that simply reserving
-+ * PAGE_SIZE from mpg->mpf_physptr yields BUG()
-+ * in reserve_bootmem.
-+ */
-+ unsigned long size = PAGE_SIZE;
-+ unsigned long end = max_low_pfn * PAGE_SIZE;
-+ if (mpf->mpf_physptr + size > end)
-+ size = end - mpf->mpf_physptr;
-+ reserve_bootmem(mpf->mpf_physptr, size);
-+ }
-+#else
-+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
-+ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
-+#endif
-+
-+ mpf_found = mpf;
-+ return 1;
-+ }
-+ bp += 4;
-+ length -= 16;
-+ }
-+ return 0;
-+}
-+
-+void __init find_smp_config (void)
-+{
-+#ifndef CONFIG_XEN
-+ unsigned int address;
-+#endif
-+
-+ /*
-+ * FIXME: Linux assumes you have 640K of base ram..
-+ * this continues the error...
-+ *
-+ * 1) Scan the bottom 1K for a signature
-+ * 2) Scan the top 1K of base RAM
-+ * 3) Scan the 64K of bios
-+ */
-+ if (smp_scan_config(0x0,0x400) ||
-+ smp_scan_config(639*0x400,0x400) ||
-+ smp_scan_config(0xF0000,0x10000))
-+ return;
-+ /*
-+ * If it is an SMP machine we should know now, unless the
-+ * configuration is in an EISA/MCA bus machine with an
-+ * extended bios data area.
-+ *
-+ * there is a real-mode segmented pointer pointing to the
-+ * 4K EBDA area at 0x40E, calculate and scan it here.
-+ *
-+ * NOTE! There are Linux loaders that will corrupt the EBDA
-+ * area, and as such this kind of SMP config may be less
-+ * trustworthy, simply because the SMP table may have been
-+ * stomped on during early boot. These loaders are buggy and
-+ * should be fixed.
-+ *
-+ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
-+ */
-+
-+#ifndef CONFIG_XEN
-+ address = get_bios_ebda();
-+ if (address)
-+ smp_scan_config(address, 0x400);
-+#endif
-+}
-+
-+/* --------------------------------------------------------------------------
-+ ACPI-based MP Configuration
-+ -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+void __init mp_register_lapic_address (
-+ u64 address)
-+{
-+#ifndef CONFIG_XEN
-+ mp_lapic_addr = (unsigned long) address;
-+
-+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
-+
-+ if (boot_cpu_physical_apicid == -1U)
-+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
-+
-+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-+#endif
-+}
-+
-+
-+void __devinit mp_register_lapic (
-+ u8 id,
-+ u8 enabled)
-+{
-+ struct mpc_config_processor processor;
-+ int boot_cpu = 0;
-+
-+ if (MAX_APICS - id <= 0) {
-+ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-+ id, MAX_APICS);
-+ return;
-+ }
-+
-+ if (id == boot_cpu_physical_apicid)
-+ boot_cpu = 1;
-+
-+#ifndef CONFIG_XEN
-+ processor.mpc_type = MP_PROCESSOR;
-+ processor.mpc_apicid = id;
-+ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
-+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+ processor.mpc_reserved[0] = 0;
-+ processor.mpc_reserved[1] = 0;
-+#endif
-+
-+ MP_processor_info(&processor);
-+}
-+
-+#ifdef CONFIG_X86_IO_APIC
-+
-+#define MP_ISA_BUS 0
-+#define MP_MAX_IOAPIC_PIN 127
-+
-+static struct mp_ioapic_routing {
-+ int apic_id;
-+ int gsi_base;
-+ int gsi_end;
-+ u32 pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
-+
-+
-+static int mp_find_ioapic (
-+ int gsi)
-+{
-+ int i = 0;
-+
-+ /* Find the IOAPIC that manages this GSI. */
-+ for (i = 0; i < nr_ioapics; i++) {
-+ if ((gsi >= mp_ioapic_routing[i].gsi_base)
-+ && (gsi <= mp_ioapic_routing[i].gsi_end))
-+ return i;
-+ }
-+
-+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-+
-+ return -1;
-+}
-+
-+
-+void __init mp_register_ioapic (
-+ u8 id,
-+ u32 address,
-+ u32 gsi_base)
-+{
-+ int idx = 0;
-+ int tmpid;
-+
-+ if (nr_ioapics >= MAX_IO_APICS) {
-+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-+ }
-+ if (!address) {
-+ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+ " found in MADT table, skipping!\n");
-+ return;
-+ }
-+
-+ idx = nr_ioapics++;
-+
-+ mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+ mp_ioapics[idx].mpc_apicaddr = address;
-+
-+#ifndef CONFIG_XEN
-+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-+#endif
-+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
-+ tmpid = io_apic_get_unique_id(idx, id);
-+ else
-+ tmpid = id;
-+ if (tmpid == -1) {
-+ nr_ioapics--;
-+ return;
-+ }
-+ mp_ioapics[idx].mpc_apicid = tmpid;
-+ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-+
-+ /*
-+ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
-+ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
-+ */
-+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+ mp_ioapic_routing[idx].gsi_base = gsi_base;
-+ mp_ioapic_routing[idx].gsi_end = gsi_base +
-+ io_apic_get_redir_entries(idx);
-+
-+ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
-+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
-+ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+ mp_ioapic_routing[idx].gsi_base,
-+ mp_ioapic_routing[idx].gsi_end);
-+
-+ return;
-+}
-+
-+
-+void __init mp_override_legacy_irq (
-+ u8 bus_irq,
-+ u8 polarity,
-+ u8 trigger,
-+ u32 gsi)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int ioapic = -1;
-+ int pin = -1;
-+
-+ /*
-+ * Convert 'gsi' to 'ioapic.pin'.
-+ */
-+ ioapic = mp_find_ioapic(gsi);
-+ if (ioapic < 0)
-+ return;
-+ pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
-+
-+ /*
-+ * TBD: This check is for faulty timer entries, where the override
-+ * erroneously sets the trigger to level, resulting in a HUGE
-+ * increase of timer interrupts!
-+ */
-+ if ((bus_irq == 0) && (trigger == 3))
-+ trigger = 1;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqtype = mp_INT;
-+ intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+ intsrc.mpc_srcbus = MP_ISA_BUS;
-+ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
-+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
-+ intsrc.mpc_dstirq = pin; /* INTIN# */
-+
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
-+ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
-+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
-+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-+
-+ mp_irqs[mp_irq_entries] = intsrc;
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!\n");
-+
-+ return;
-+}
-+
-+int es7000_plat;
-+
-+void __init mp_config_acpi_legacy_irqs (void)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int i = 0;
-+ int ioapic = -1;
-+
-+ /*
-+ * Fabricate the legacy ISA bus (bus #31).
-+ */
-+ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-+ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
-+
-+ /*
-+ * Older generations of ES7000 have no legacy identity mappings
-+ */
-+ if (es7000_plat == 1)
-+ return;
-+
-+ /*
-+ * Locate the IOAPIC that manages the ISA IRQs (0-15).
-+ */
-+ ioapic = mp_find_ioapic(0);
-+ if (ioapic < 0)
-+ return;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqflag = 0; /* Conforming */
-+ intsrc.mpc_srcbus = MP_ISA_BUS;
-+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
-+
-+ /*
-+ * Use the default configuration for the IRQs 0-15. Unless
-+ * overriden by (MADT) interrupt source override entries.
-+ */
-+ for (i = 0; i < 16; i++) {
-+ int idx;
-+
-+ for (idx = 0; idx < mp_irq_entries; idx++) {
-+ struct mpc_config_intsrc *irq = mp_irqs + idx;
-+
-+ /* Do we already have a mapping for this ISA IRQ? */
-+ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+ break;
-+
-+ /* Do we already have a mapping for this IOAPIC pin */
-+ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+ (irq->mpc_dstirq == i))
-+ break;
-+ }
-+
-+ if (idx != mp_irq_entries) {
-+ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+ continue; /* IRQ already used */
-+ }
-+
-+ intsrc.mpc_irqtype = mp_INT;
-+ intsrc.mpc_srcbusirq = i; /* Identity mapped */
-+ intsrc.mpc_dstirq = i;
-+
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
-+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
-+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
-+ intsrc.mpc_dstirq);
-+
-+ mp_irqs[mp_irq_entries] = intsrc;
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!\n");
-+ }
-+}
-+
-+#define MAX_GSI_NUM 4096
-+
-+int mp_register_gsi (u32 gsi, int triggering, int polarity)
-+{
-+ int ioapic = -1;
-+ int ioapic_pin = 0;
-+ int idx, bit = 0;
-+ static int pci_irq = 16;
-+ /*
-+ * Mapping between Global System Interrups, which
-+ * represent all possible interrupts, and IRQs
-+ * assigned to actual devices.
-+ */
-+ static int gsi_to_irq[MAX_GSI_NUM];
-+
-+ /* Don't set up the ACPI SCI because it's already set up */
-+ if (acpi_fadt.sci_int == gsi)
-+ return gsi;
-+
-+ ioapic = mp_find_ioapic(gsi);
-+ if (ioapic < 0) {
-+ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+ return gsi;
-+ }
-+
-+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
-+
-+ if (ioapic_renumber_irq)
-+ gsi = ioapic_renumber_irq(ioapic, gsi);
-+
-+ /*
-+ * Avoid pin reprogramming. PRTs typically include entries
-+ * with redundant pin->gsi mappings (but unique PCI devices);
-+ * we only program the IOAPIC on the first.
-+ */
-+ bit = ioapic_pin % 32;
-+ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+ if (idx > 3) {
-+ printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
-+ ioapic_pin);
-+ return gsi;
-+ }
-+ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+ return gsi_to_irq[gsi];
-+ }
-+
-+ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
-+
-+ if (triggering == ACPI_LEVEL_SENSITIVE) {
-+ /*
-+ * For PCI devices assign IRQs in order, avoiding gaps
-+ * due to unused I/O APIC pins.
-+ */
-+ int irq = gsi;
-+ if (gsi < MAX_GSI_NUM) {
-+ if (gsi > 15)
-+ gsi = pci_irq++;
-+ /*
-+ * Don't assign IRQ used by ACPI SCI
-+ */
-+ if (gsi == acpi_fadt.sci_int)
-+ gsi = pci_irq++;
-+ gsi_to_irq[irq] = gsi;
-+ } else {
-+ printk(KERN_ERR "GSI %u is too high\n", gsi);
-+ return gsi;
-+ }
-+ }
-+
-+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-+ return gsi;
-+}
-+
-+#endif /* CONFIG_X86_IO_APIC */
-+#endif /* CONFIG_ACPI */
-diff --git a/arch/i386/kernel/pci-dma-xen.c b/arch/i386/kernel/pci-dma-xen.c
-new file mode 100644
-index 0000000..a707f24
---- /dev/null
-+++ b/arch/i386/kernel/pci-dma-xen.c
-@@ -0,0 +1,326 @@
-+/*
-+ * Dynamic DMA mapping support.
-+ *
-+ * On i386 there is no hardware dynamic DMA address translation,
-+ * so consistent alloc/free are merely page allocation/freeing.
-+ * The rest of the dynamic DMA mapping interface is implemented
-+ * in asm/pci.h.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/pci.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <asm/io.h>
-+#include <xen/balloon.h>
-+#include <asm/tlbflush.h>
-+#include <asm-i386/swiotlb.h>
-+#include <asm/bug.h>
-+
-+#ifdef __x86_64__
-+int iommu_merge __read_mostly = 0;
-+EXPORT_SYMBOL(iommu_merge);
-+
-+dma_addr_t bad_dma_address __read_mostly;
-+EXPORT_SYMBOL(bad_dma_address);
-+
-+/* This tells the BIO block layer to assume merging. Default to off
-+ because we cannot guarantee merging later. */
-+int iommu_bio_merge __read_mostly = 0;
-+EXPORT_SYMBOL(iommu_bio_merge);
-+
-+__init int iommu_setup(char *p)
-+{
-+ return 1;
-+}
-+#endif
-+
-+struct dma_coherent_mem {
-+ void *virt_base;
-+ u32 device_base;
-+ int size;
-+ int flags;
-+ unsigned long *bitmap;
-+};
-+
-+#define IOMMU_BUG_ON(test) \
-+do { \
-+ if (unlikely(test)) { \
-+ printk(KERN_ALERT "Fatal DMA error! " \
-+ "Please use 'swiotlb=force'\n"); \
-+ BUG(); \
-+ } \
-+} while (0)
-+
-+int
-+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+ enum dma_data_direction direction)
-+{
-+ int i, rc;
-+
-+ if (direction == DMA_NONE)
-+ BUG();
-+ WARN_ON(nents == 0 || sg[0].length == 0);
-+
-+ if (swiotlb) {
-+ rc = swiotlb_map_sg(hwdev, sg, nents, direction);
-+ } else {
-+ for (i = 0; i < nents; i++ ) {
-+ sg[i].dma_address =
-+ page_to_phys(sg[i].page) + sg[i].offset;
-+ sg[i].dma_length = sg[i].length;
-+ BUG_ON(!sg[i].page);
-+ IOMMU_BUG_ON(address_needs_mapping(
-+ hwdev, sg[i].dma_address));
-+ }
-+ rc = nents;
-+ }
-+
-+ flush_write_buffers();
-+ return rc;
-+}
-+EXPORT_SYMBOL(dma_map_sg);
-+
-+void
-+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+ enum dma_data_direction direction)
-+{
-+ BUG_ON(direction == DMA_NONE);
-+ if (swiotlb)
-+ swiotlb_unmap_sg(hwdev, sg, nents, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_sg);
-+
-+dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+ size_t size, enum dma_data_direction direction)
-+{
-+ dma_addr_t dma_addr;
-+
-+ BUG_ON(direction == DMA_NONE);
-+
-+ if (swiotlb) {
-+ dma_addr = swiotlb_map_page(
-+ dev, page, offset, size, direction);
-+ } else {
-+ dma_addr = page_to_phys(page) + offset;
-+ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
-+ }
-+
-+ return dma_addr;
-+}
-+EXPORT_SYMBOL(dma_map_page);
-+
-+void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ BUG_ON(direction == DMA_NONE);
-+ if (swiotlb)
-+ swiotlb_unmap_page(dev, dma_address, size, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_page);
-+
-+int
-+dma_mapping_error(dma_addr_t dma_addr)
-+{
-+ if (swiotlb)
-+ return swiotlb_dma_mapping_error(dma_addr);
-+ return 0;
-+}
-+EXPORT_SYMBOL(dma_mapping_error);
-+
-+int
-+dma_supported(struct device *dev, u64 mask)
-+{
-+ if (swiotlb)
-+ return swiotlb_dma_supported(dev, mask);
-+ /*
-+ * By default we'll BUG when an infeasible DMA is requested, and
-+ * request swiotlb=force (see IOMMU_BUG_ON).
-+ */
-+ return 1;
-+}
-+EXPORT_SYMBOL(dma_supported);
-+
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+ dma_addr_t *dma_handle, gfp_t gfp)
-+{
-+ void *ret;
-+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+ unsigned int order = get_order(size);
-+ unsigned long vstart;
-+ /* ignore region specifiers */
-+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-+
-+ if (mem) {
-+ int page = bitmap_find_free_region(mem->bitmap, mem->size,
-+ order);
-+ if (page >= 0) {
-+ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
-+ ret = mem->virt_base + (page << PAGE_SHIFT);
-+ memset(ret, 0, size);
-+ return ret;
-+ }
-+ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
-+ return NULL;
-+ }
-+
-+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-+ gfp |= GFP_DMA;
-+
-+ vstart = __get_free_pages(gfp, order);
-+ ret = (void *)vstart;
-+
-+ if (ret != NULL) {
-+ /* NB. Hardcode 31 address bits for now: aacraid limitation. */
-+ if (xen_create_contiguous_region(vstart, order, 31) != 0) {
-+ free_pages(vstart, order);
-+ return NULL;
-+ }
-+ memset(ret, 0, size);
-+ *dma_handle = virt_to_bus(ret);
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(dma_alloc_coherent);
-+
-+void dma_free_coherent(struct device *dev, size_t size,
-+ void *vaddr, dma_addr_t dma_handle)
-+{
-+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+ int order = get_order(size);
-+
-+ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-+
-+ bitmap_release_region(mem->bitmap, page, order);
-+ } else {
-+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
-+ free_pages((unsigned long)vaddr, order);
-+ }
-+}
-+EXPORT_SYMBOL(dma_free_coherent);
-+
-+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+ dma_addr_t device_addr, size_t size, int flags)
-+{
-+ void __iomem *mem_base;
-+ int pages = size >> PAGE_SHIFT;
-+ int bitmap_size = (pages + 31)/32;
-+
-+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
-+ goto out;
-+ if (!size)
-+ goto out;
-+ if (dev->dma_mem)
-+ goto out;
-+
-+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-+
-+ mem_base = ioremap(bus_addr, size);
-+ if (!mem_base)
-+ goto out;
-+
-+ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
-+ if (!dev->dma_mem)
-+ goto out;
-+ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
-+ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
-+ if (!dev->dma_mem->bitmap)
-+ goto free1_out;
-+ memset(dev->dma_mem->bitmap, 0, bitmap_size);
-+
-+ dev->dma_mem->virt_base = mem_base;
-+ dev->dma_mem->device_base = device_addr;
-+ dev->dma_mem->size = pages;
-+ dev->dma_mem->flags = flags;
-+
-+ if (flags & DMA_MEMORY_MAP)
-+ return DMA_MEMORY_MAP;
-+
-+ return DMA_MEMORY_IO;
-+
-+ free1_out:
-+ kfree(dev->dma_mem->bitmap);
-+ out:
-+ return 0;
-+}
-+EXPORT_SYMBOL(dma_declare_coherent_memory);
-+
-+void dma_release_declared_memory(struct device *dev)
-+{
-+ struct dma_coherent_mem *mem = dev->dma_mem;
-+
-+ if(!mem)
-+ return;
-+ dev->dma_mem = NULL;
-+ iounmap(mem->virt_base);
-+ kfree(mem->bitmap);
-+ kfree(mem);
-+}
-+EXPORT_SYMBOL(dma_release_declared_memory);
-+
-+void *dma_mark_declared_memory_occupied(struct device *dev,
-+ dma_addr_t device_addr, size_t size)
-+{
-+ struct dma_coherent_mem *mem = dev->dma_mem;
-+ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+ int pos, err;
-+
-+ if (!mem)
-+ return ERR_PTR(-EINVAL);
-+
-+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
-+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
-+ if (err != 0)
-+ return ERR_PTR(err);
-+ return mem->virt_base + (pos << PAGE_SHIFT);
-+}
-+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-+
-+dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ dma_addr_t dma;
-+
-+ if (direction == DMA_NONE)
-+ BUG();
-+ WARN_ON(size == 0);
-+
-+ if (swiotlb) {
-+ dma = swiotlb_map_single(dev, ptr, size, direction);
-+ } else {
-+ dma = virt_to_bus(ptr);
-+ IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
-+ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
-+ }
-+
-+ flush_write_buffers();
-+ return dma;
-+}
-+EXPORT_SYMBOL(dma_map_single);
-+
-+void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ if (direction == DMA_NONE)
-+ BUG();
-+ if (swiotlb)
-+ swiotlb_unmap_single(dev, dma_addr, size, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_single);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/arch/i386/kernel/process-xen.c b/arch/i386/kernel/process-xen.c
-new file mode 100644
-index 0000000..47cde96
---- /dev/null
-+++ b/arch/i386/kernel/process-xen.c
-@@ -0,0 +1,833 @@
-+/*
-+ * linux/arch/i386/kernel/process.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Pentium III FXSR, SSE support
-+ * Gareth Hughes <gareth@valinux.com>, May 2000
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of process handling..
-+ */
-+
-+#include <stdarg.h>
-+
-+#include <linux/cpu.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/interrupt.h>
-+#include <linux/config.h>
-+#include <linux/utsname.h>
-+#include <linux/delay.h>
-+#include <linux/reboot.h>
-+#include <linux/init.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/random.h>
-+#include <linux/kprobes.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/ldt.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/desc.h>
-+#include <asm/vm86.h>
-+#ifdef CONFIG_MATH_EMULATION
-+#include <asm/math_emu.h>
-+#endif
-+
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/vcpu.h>
-+
-+#include <linux/err.h>
-+
-+#include <asm/tlbflush.h>
-+#include <asm/cpu.h>
-+
-+#include <asm/tlbflush.h>
-+#include <asm/cpu.h>
-+
-+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-+
-+static int hlt_counter;
-+
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
-+
-+/*
-+ * Return saved PC of a blocked thread.
-+ */
-+unsigned long thread_saved_pc(struct task_struct *tsk)
-+{
-+ return ((unsigned long *)tsk->thread.esp)[3];
-+}
-+
-+/*
-+ * Powermanagement idle function, if any..
-+ */
-+void (*pm_idle)(void);
-+EXPORT_SYMBOL(pm_idle);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-+
-+void disable_hlt(void)
-+{
-+ hlt_counter++;
-+}
-+
-+EXPORT_SYMBOL(disable_hlt);
-+
-+void enable_hlt(void)
-+{
-+ hlt_counter--;
-+}
-+
-+EXPORT_SYMBOL(enable_hlt);
-+
-+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-+extern void stop_hz_timer(void);
-+extern void start_hz_timer(void);
-+void xen_idle(void)
-+{
-+ local_irq_disable();
-+
-+ if (need_resched())
-+ local_irq_enable();
-+ else {
-+ clear_thread_flag(TIF_POLLING_NRFLAG);
-+ smp_mb__after_clear_bit();
-+ stop_hz_timer();
-+ /* Blocking includes an implicit local_irq_enable(). */
-+ HYPERVISOR_sched_op(SCHEDOP_block, 0);
-+ start_hz_timer();
-+ set_thread_flag(TIF_POLLING_NRFLAG);
-+ }
-+}
-+#ifdef CONFIG_APM_MODULE
-+EXPORT_SYMBOL(default_idle);
-+#endif
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+extern cpumask_t cpu_initialized;
-+static inline void play_dead(void)
-+{
-+ idle_task_exit();
-+ local_irq_disable();
-+ cpu_clear(smp_processor_id(), cpu_initialized);
-+ preempt_enable_no_resched();
-+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-+ /* Same as drivers/xen/core/smpboot.c:cpu_bringup(). */
-+ cpu_init();
-+ touch_softlockup_watchdog();
-+ preempt_disable();
-+ local_irq_enable();
-+}
-+#else
-+static inline void play_dead(void)
-+{
-+ BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
-+ */
-+void cpu_idle(void)
-+{
-+ int cpu = smp_processor_id();
-+
-+ set_thread_flag(TIF_POLLING_NRFLAG);
-+
-+ /* endless idle loop with no priority at all */
-+ while (1) {
-+ while (!need_resched()) {
-+
-+ if (__get_cpu_var(cpu_idle_state))
-+ __get_cpu_var(cpu_idle_state) = 0;
-+
-+ rmb();
-+
-+ if (cpu_is_offline(cpu))
-+ play_dead();
-+
-+ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
-+ xen_idle();
-+ }
-+ preempt_enable_no_resched();
-+ schedule();
-+ preempt_disable();
-+ }
-+}
-+
-+void cpu_idle_wait(void)
-+{
-+ unsigned int cpu, this_cpu = get_cpu();
-+ cpumask_t map;
-+
-+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+ put_cpu();
-+
-+ cpus_clear(map);
-+ for_each_online_cpu(cpu) {
-+ per_cpu(cpu_idle_state, cpu) = 1;
-+ cpu_set(cpu, map);
-+ }
-+
-+ __get_cpu_var(cpu_idle_state) = 0;
-+
-+ wmb();
-+ do {
-+ ssleep(1);
-+ for_each_online_cpu(cpu) {
-+ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
-+ cpu_clear(cpu, map);
-+ }
-+ cpus_and(map, map, cpu_online_map);
-+ } while (!cpus_empty(map));
-+}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
-+
-+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
-+/* Always use xen_idle() instead. */
-+void __devinit select_idle_routine(const struct cpuinfo_x86 *c) {}
-+
-+void show_regs(struct pt_regs * regs)
-+{
-+ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
-+
-+ printk("\n");
-+ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
-+ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
-+ print_symbol("EIP is at %s\n", regs->eip);
-+
-+ if (user_mode(regs))
-+ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
-+ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
-+ regs->eflags, print_tainted(), system_utsname.release,
-+ (int)strcspn(system_utsname.version, " "),
-+ system_utsname.version);
-+ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-+ regs->eax,regs->ebx,regs->ecx,regs->edx);
-+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
-+ regs->esi, regs->edi, regs->ebp);
-+ printk(" DS: %04x ES: %04x\n",
-+ 0xffff & regs->xds,0xffff & regs->xes);
-+
-+ cr0 = read_cr0();
-+ cr2 = read_cr2();
-+ cr3 = read_cr3();
-+ cr4 = read_cr4_safe();
-+ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
-+ show_trace(NULL, &regs->esp);
-+}
-+
-+/*
-+ * This gets run with %ebx containing the
-+ * function to call, and %edx containing
-+ * the "args".
-+ */
-+extern void kernel_thread_helper(void);
-+__asm__(".section .text\n"
-+ ".align 4\n"
-+ "kernel_thread_helper:\n\t"
-+ "movl %edx,%eax\n\t"
-+ "pushl %edx\n\t"
-+ "call *%ebx\n\t"
-+ "pushl %eax\n\t"
-+ "call do_exit\n"
-+ ".previous");
-+
-+/*
-+ * Create a kernel thread
-+ */
-+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-+{
-+ struct pt_regs regs;
-+
-+ memset(&regs, 0, sizeof(regs));
-+
-+ regs.ebx = (unsigned long) fn;
-+ regs.edx = (unsigned long) arg;
-+
-+ regs.xds = __USER_DS;
-+ regs.xes = __USER_DS;
-+ regs.orig_eax = -1;
-+ regs.eip = (unsigned long) kernel_thread_helper;
-+ regs.xcs = GET_KERNEL_CS();
-+ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
-+
-+ /* Ok, create the new process.. */
-+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-+}
-+EXPORT_SYMBOL(kernel_thread);
-+
-+/*
-+ * Free current thread data structures etc..
-+ */
-+void exit_thread(void)
-+{
-+ struct task_struct *tsk = current;
-+ struct thread_struct *t = &tsk->thread;
-+
-+ /*
-+ * Remove function-return probe instances associated with this task
-+ * and put them back on the free list. Do not insert an exit probe for
-+ * this function, it will be disabled by kprobe_flush_task if you do.
-+ */
-+ kprobe_flush_task(tsk);
-+
-+ /* The process may have allocated an io port bitmap... nuke it. */
-+ if (unlikely(NULL != t->io_bitmap_ptr)) {
-+ physdev_op_t op = { 0 };
-+ op.cmd = PHYSDEVOP_SET_IOBITMAP;
-+ HYPERVISOR_physdev_op(&op);
-+ kfree(t->io_bitmap_ptr);
-+ t->io_bitmap_ptr = NULL;
-+ }
-+}
-+
-+void flush_thread(void)
-+{
-+ struct task_struct *tsk = current;
-+
-+ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
-+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-+ /*
-+ * Forget coprocessor state..
-+ */
-+ clear_fpu(tsk);
-+ clear_used_math();
-+}
-+
-+void release_thread(struct task_struct *dead_task)
-+{
-+ BUG_ON(dead_task->mm);
-+ release_vm86_irqs(dead_task);
-+}
-+
-+/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
-+ */
-+void prepare_to_copy(struct task_struct *tsk)
-+{
-+ unlazy_fpu(tsk);
-+}
-+
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
-+ unsigned long unused,
-+ struct task_struct * p, struct pt_regs * regs)
-+{
-+ struct pt_regs * childregs;
-+ struct task_struct *tsk;
-+ int err;
-+
-+ childregs = task_pt_regs(p);
-+ *childregs = *regs;
-+ childregs->eax = 0;
-+ childregs->esp = esp;
-+
-+ p->thread.esp = (unsigned long) childregs;
-+ p->thread.esp0 = (unsigned long) (childregs+1);
-+
-+ p->thread.eip = (unsigned long) ret_from_fork;
-+
-+ savesegment(fs,p->thread.fs);
-+ savesegment(gs,p->thread.gs);
-+
-+ tsk = current;
-+ if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
-+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+ if (!p->thread.io_bitmap_ptr) {
-+ p->thread.io_bitmap_max = 0;
-+ return -ENOMEM;
-+ }
-+ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
-+ IO_BITMAP_BYTES);
-+ }
-+
-+ /*
-+ * Set a new TLS for the child thread?
-+ */
-+ if (clone_flags & CLONE_SETTLS) {
-+ struct desc_struct *desc;
-+ struct user_desc info;
-+ int idx;
-+
-+ err = -EFAULT;
-+ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
-+ goto out;
-+ err = -EINVAL;
-+ if (LDT_empty(&info))
-+ goto out;
-+
-+ idx = info.entry_number;
-+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+ goto out;
-+
-+ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-+ desc->a = LDT_entry_a(&info);
-+ desc->b = LDT_entry_b(&info);
-+ }
-+
-+ p->thread.iopl = current->thread.iopl;
-+
-+ err = 0;
-+ out:
-+ if (err && p->thread.io_bitmap_ptr) {
-+ kfree(p->thread.io_bitmap_ptr);
-+ p->thread.io_bitmap_max = 0;
-+ }
-+ return err;
-+}
-+
-+/*
-+ * fill in the user structure for a core dump..
-+ */
-+void dump_thread(struct pt_regs * regs, struct user * dump)
-+{
-+ int i;
-+
-+/* changed the size calculations - should hopefully work better. lbt */
-+ dump->magic = CMAGIC;
-+ dump->start_code = 0;
-+ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
-+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
-+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
-+ dump->u_dsize -= dump->u_tsize;
-+ dump->u_ssize = 0;
-+ for (i = 0; i < 8; i++)
-+ dump->u_debugreg[i] = current->thread.debugreg[i];
-+
-+ if (dump->start_stack < TASK_SIZE)
-+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-+
-+ dump->regs.ebx = regs->ebx;
-+ dump->regs.ecx = regs->ecx;
-+ dump->regs.edx = regs->edx;
-+ dump->regs.esi = regs->esi;
-+ dump->regs.edi = regs->edi;
-+ dump->regs.ebp = regs->ebp;
-+ dump->regs.eax = regs->eax;
-+ dump->regs.ds = regs->xds;
-+ dump->regs.es = regs->xes;
-+ savesegment(fs,dump->regs.fs);
-+ savesegment(gs,dump->regs.gs);
-+ dump->regs.orig_eax = regs->orig_eax;
-+ dump->regs.eip = regs->eip;
-+ dump->regs.cs = regs->xcs;
-+ dump->regs.eflags = regs->eflags;
-+ dump->regs.esp = regs->esp;
-+ dump->regs.ss = regs->xss;
-+
-+ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-+}
-+EXPORT_SYMBOL(dump_thread);
-+
-+/*
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-+{
-+ struct pt_regs ptregs = *task_pt_regs(tsk);
-+ ptregs.xcs &= 0xffff;
-+ ptregs.xds &= 0xffff;
-+ ptregs.xes &= 0xffff;
-+ ptregs.xss &= 0xffff;
-+
-+ elf_core_copy_regs(regs, &ptregs);
-+
-+ return 1;
-+}
-+
-+/*
-+ * This function selects if the context switch from prev to next
-+ * has to tweak the TSC disable bit in the cr4.
-+ */
-+static inline void disable_tsc(struct task_struct *prev_p,
-+ struct task_struct *next_p)
-+{
-+ struct thread_info *prev, *next;
-+
-+ /*
-+ * gcc should eliminate the ->thread_info dereference if
-+ * has_secure_computing returns 0 at compile time (SECCOMP=n).
-+ */
-+ prev = task_thread_info(prev_p);
-+ next = task_thread_info(next_p);
-+
-+ if (has_secure_computing(prev) || has_secure_computing(next)) {
-+ /* slow path here */
-+ if (has_secure_computing(prev) &&
-+ !has_secure_computing(next)) {
-+ write_cr4(read_cr4() & ~X86_CR4_TSD);
-+ } else if (!has_secure_computing(prev) &&
-+ has_secure_computing(next))
-+ write_cr4(read_cr4() | X86_CR4_TSD);
-+ }
-+}
-+
-+/*
-+ * switch_to(x,yn) should switch tasks from x to y.
-+ *
-+ * We fsave/fwait so that an exception goes off at the right time
-+ * (as a call from the fsave or fwait in effect) rather than to
-+ * the wrong process. Lazy FP saving no longer makes any sense
-+ * with modern CPU's, and this simplifies a lot of things (SMP
-+ * and UP become the same).
-+ *
-+ * NOTE! We used to use the x86 hardware context switching. The
-+ * reason for not using it any more becomes apparent when you
-+ * try to recover gracefully from saved state that is no longer
-+ * valid (stale segment register values in particular). With the
-+ * hardware task-switch, there is no way to fix up bad state in
-+ * a reasonable manner.
-+ *
-+ * The fact that Intel documents the hardware task-switching to
-+ * be slow is a fairly red herring - this code is not noticeably
-+ * faster. However, there _is_ some room for improvement here,
-+ * so the performance issues may eventually be a valid point.
-+ * More important, however, is the fact that this allows us much
-+ * more flexibility.
-+ *
-+ * The return value (in %eax) will be the "prev" task after
-+ * the task-switch, and shows up in ret_from_fork in entry.S,
-+ * for example.
-+ */
-+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ struct thread_struct *prev = &prev_p->thread,
-+ *next = &next_p->thread;
-+ int cpu = smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+#endif
-+ physdev_op_t iopl_op, iobmp_op;
-+ multicall_entry_t _mcl[8], *mcl = _mcl;
-+
-+ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
-+
-+ /*
-+ * This is basically '__unlazy_fpu', except that we queue a
-+ * multicall to indicate FPU task switch, rather than
-+ * synchronously trapping to Xen.
-+ */
-+ if (prev_p->thread_info->status & TS_USEDFPU) {
-+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+ mcl->op = __HYPERVISOR_fpu_taskswitch;
-+ mcl->args[0] = 1;
-+ mcl++;
-+ }
-+#if 0 /* lazy fpu sanity check */
-+ else BUG_ON(!(read_cr0() & 8));
-+#endif
-+
-+ /*
-+ * Reload esp0.
-+ * This is load_esp0(tss, next) with a multicall.
-+ */
-+ mcl->op = __HYPERVISOR_stack_switch;
-+ mcl->args[0] = __KERNEL_DS;
-+ mcl->args[1] = next->esp0;
-+ mcl++;
-+
-+ /*
-+ * Load the per-thread Thread-Local Storage descriptor.
-+ * This is load_TLS(next, cpu) with multicalls.
-+ */
-+#define C(i) do { \
-+ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
-+ next->tls_array[i].b != prev->tls_array[i].b)) { \
-+ mcl->op = __HYPERVISOR_update_descriptor; \
-+ *(u64 *)&mcl->args[0] = virt_to_machine( \
-+ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
-+ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
-+ mcl++; \
-+ } \
-+} while (0)
-+ C(0); C(1); C(2);
-+#undef C
-+
-+ if (unlikely(prev->iopl != next->iopl)) {
-+ iopl_op.cmd = PHYSDEVOP_SET_IOPL;
-+ iopl_op.u.set_iopl.iopl = (next->iopl == 0) ? 1 :
-+ (next->iopl >> 12) & 3;
-+ mcl->op = __HYPERVISOR_physdev_op;
-+ mcl->args[0] = (unsigned long)&iopl_op;
-+ mcl++;
-+ }
-+
-+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+ iobmp_op.cmd =
-+ PHYSDEVOP_SET_IOBITMAP;
-+ iobmp_op.u.set_iobitmap.bitmap =
-+ (char *)next->io_bitmap_ptr;
-+ iobmp_op.u.set_iobitmap.nr_ports =
-+ next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+ mcl->op = __HYPERVISOR_physdev_op;
-+ mcl->args[0] = (unsigned long)&iobmp_op;
-+ mcl++;
-+ }
-+
-+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
-+
-+ /*
-+ * Restore %fs and %gs if needed.
-+ *
-+ * Glibc normally makes %fs be zero, and %gs is one of
-+ * the TLS segments.
-+ */
-+ if (unlikely(next->fs))
-+ loadsegment(fs, next->fs);
-+
-+ if (next->gs)
-+ loadsegment(gs, next->gs);
-+
-+ /*
-+ * Now maybe reload the debug registers
-+ */
-+ if (unlikely(next->debugreg[7])) {
-+ set_debugreg(next->debugreg[0], 0);
-+ set_debugreg(next->debugreg[1], 1);
-+ set_debugreg(next->debugreg[2], 2);
-+ set_debugreg(next->debugreg[3], 3);
-+ /* no 4 and 5 */
-+ set_debugreg(next->debugreg[6], 6);
-+ set_debugreg(next->debugreg[7], 7);
-+ }
-+
-+ disable_tsc(prev_p, next_p);
-+
-+ return prev_p;
-+}
-+
-+asmlinkage int sys_fork(struct pt_regs regs)
-+{
-+ return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+}
-+
-+asmlinkage int sys_clone(struct pt_regs regs)
-+{
-+ unsigned long clone_flags;
-+ unsigned long newsp;
-+ int __user *parent_tidptr, *child_tidptr;
-+
-+ clone_flags = regs.ebx;
-+ newsp = regs.ecx;
-+ parent_tidptr = (int __user *)regs.edx;
-+ child_tidptr = (int __user *)regs.edi;
-+ if (!newsp)
-+ newsp = regs.esp;
-+ return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
-+}
-+
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage int sys_vfork(struct pt_regs regs)
-+{
-+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+}
-+
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage int sys_execve(struct pt_regs regs)
-+{
-+ int error;
-+ char * filename;
-+
-+ filename = getname((char __user *) regs.ebx);
-+ error = PTR_ERR(filename);
-+ if (IS_ERR(filename))
-+ goto out;
-+ error = do_execve(filename,
-+ (char __user * __user *) regs.ecx,
-+ (char __user * __user *) regs.edx,
-+ &regs);
-+ if (error == 0) {
-+ task_lock(current);
-+ current->ptrace &= ~PT_DTRACE;
-+ task_unlock(current);
-+ /* Make sure we don't return using sysenter.. */
-+ set_thread_flag(TIF_IRET);
-+ }
-+ putname(filename);
-+out:
-+ return error;
-+}
-+
-+#define top_esp (THREAD_SIZE - sizeof(unsigned long))
-+#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+ unsigned long ebp, esp, eip;
-+ unsigned long stack_page;
-+ int count = 0;
-+ if (!p || p == current || p->state == TASK_RUNNING)
-+ return 0;
-+ stack_page = (unsigned long)task_stack_page(p);
-+ esp = p->thread.esp;
-+ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
-+ return 0;
-+ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
-+ ebp = *(unsigned long *) esp;
-+ do {
-+ if (ebp < stack_page || ebp > top_ebp+stack_page)
-+ return 0;
-+ eip = *(unsigned long *) (ebp+4);
-+ if (!in_sched_functions(eip))
-+ return eip;
-+ ebp = *(unsigned long *) ebp;
-+ } while (count++ < 16);
-+ return 0;
-+}
-+EXPORT_SYMBOL(get_wchan);
-+
-+/*
-+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
-+ */
-+static int get_free_idx(void)
-+{
-+ struct thread_struct *t = &current->thread;
-+ int idx;
-+
-+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
-+ if (desc_empty(t->tls_array + idx))
-+ return idx + GDT_ENTRY_TLS_MIN;
-+ return -ESRCH;
-+}
-+
-+/*
-+ * Set a given TLS descriptor:
-+ */
-+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
-+{
-+ struct thread_struct *t = &current->thread;
-+ struct user_desc info;
-+ struct desc_struct *desc;
-+ int cpu, idx;
-+
-+ if (copy_from_user(&info, u_info, sizeof(info)))
-+ return -EFAULT;
-+ idx = info.entry_number;
-+
-+ /*
-+ * index -1 means the kernel should try to find and
-+ * allocate an empty descriptor:
-+ */
-+ if (idx == -1) {
-+ idx = get_free_idx();
-+ if (idx < 0)
-+ return idx;
-+ if (put_user(idx, &u_info->entry_number))
-+ return -EFAULT;
-+ }
-+
-+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+ return -EINVAL;
-+
-+ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
-+
-+ /*
-+ * We must not get preempted while modifying the TLS.
-+ */
-+ cpu = get_cpu();
-+
-+ if (LDT_empty(&info)) {
-+ desc->a = 0;
-+ desc->b = 0;
-+ } else {
-+ desc->a = LDT_entry_a(&info);
-+ desc->b = LDT_entry_b(&info);
-+ }
-+ load_TLS(t, cpu);
-+
-+ put_cpu();
-+
-+ return 0;
-+}
-+
-+/*
-+ * Get the current Thread-Local Storage area:
-+ */
-+
-+#define GET_BASE(desc) ( \
-+ (((desc)->a >> 16) & 0x0000ffff) | \
-+ (((desc)->b << 16) & 0x00ff0000) | \
-+ ( (desc)->b & 0xff000000) )
-+
-+#define GET_LIMIT(desc) ( \
-+ ((desc)->a & 0x0ffff) | \
-+ ((desc)->b & 0xf0000) )
-+
-+#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
-+#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
-+#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
-+#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
-+#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
-+#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
-+
-+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
-+{
-+ struct user_desc info;
-+ struct desc_struct *desc;
-+ int idx;
-+
-+ if (get_user(idx, &u_info->entry_number))
-+ return -EFAULT;
-+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+ return -EINVAL;
-+
-+ memset(&info, 0, sizeof(info));
-+
-+ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-+
-+ info.entry_number = idx;
-+ info.base_addr = GET_BASE(desc);
-+ info.limit = GET_LIMIT(desc);
-+ info.seg_32bit = GET_32BIT(desc);
-+ info.contents = GET_CONTENTS(desc);
-+ info.read_exec_only = !GET_WRITABLE(desc);
-+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
-+ info.seg_not_present = !GET_PRESENT(desc);
-+ info.useable = GET_USEABLE(desc);
-+
-+ if (copy_to_user(u_info, &info, sizeof(info)))
-+ return -EFAULT;
-+ return 0;
-+}
-+
-+unsigned long arch_align_stack(unsigned long sp)
-+{
-+ if (randomize_va_space)
-+ sp -= get_random_int() % 8192;
-+ return sp & ~0xf;
-+}
-diff --git a/arch/i386/kernel/quirks-xen.c b/arch/i386/kernel/quirks-xen.c
-new file mode 100644
-index 0000000..39d9ed1
---- /dev/null
-+++ b/arch/i386/kernel/quirks-xen.c
-@@ -0,0 +1,48 @@
-+/*
-+ * This file contains work-arounds for x86 and x86_64 platform bugs.
-+ */
-+#include <linux/config.h>
-+#include <linux/pci.h>
-+#include <linux/irq.h>
-+
-+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
-+
-+static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
-+{
-+ u8 config, rev;
-+ u32 word;
-+
-+ /* BIOS may enable hardware IRQ balancing for
-+ * E7520/E7320/E7525(revision ID 0x9 and below)
-+ * based platforms.
-+ * Disable SW irqbalance/affinity on those platforms.
-+ */
-+ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
-+ if (rev > 0x9)
-+ return;
-+
-+ printk(KERN_INFO "Intel E7520/7320/7525 detected.");
-+
-+ /* enable access to config space*/
-+ pci_read_config_byte(dev, 0xf4, &config);
-+ pci_write_config_byte(dev, 0xf4, config|0x2);
-+
-+ /* read xTPR register */
-+ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
-+
-+ if (!(word & (1 << 13))) {
-+ dom0_op_t op;
-+ printk(KERN_INFO "Disabling irq balancing and affinity\n");
-+ op.cmd = DOM0_PLATFORM_QUIRK;
-+ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
-+ (void)HYPERVISOR_dom0_op(&op);
-+ }
-+
-+ /* put back the original value for config space*/
-+ if (!(config & 0x2))
-+ pci_write_config_byte(dev, 0xf4, config);
-+}
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
-+#endif
-diff --git a/arch/i386/kernel/setup-xen.c b/arch/i386/kernel/setup-xen.c
-new file mode 100644
-index 0000000..5368d2f
---- /dev/null
-+++ b/arch/i386/kernel/setup-xen.c
-@@ -0,0 +1,1874 @@
-+/*
-+ * linux/arch/i386/kernel/setup.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ *
-+ * Memory region support
-+ * David Parsons <orc@pell.chi.il.us>, July-August 1999
-+ *
-+ * Added E820 sanitization routine (removes overlapping memory regions);
-+ * Brian Moyle <bmoyle@mvista.com>, February 2001
-+ *
-+ * Moved CPU detection code to cpu/${cpu}.c
-+ * Patrick Mochel <mochel@osdl.org>, March 2002
-+ *
-+ * Provisions for empty E820 memory regions (reported by certain BIOSes).
-+ * Alex Achenbach <xela@slit.de>, December 2002.
-+ *
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of initialization
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/mmzone.h>
-+#include <linux/tty.h>
-+#include <linux/ioport.h>
-+#include <linux/acpi.h>
-+#include <linux/apm_bios.h>
-+#include <linux/initrd.h>
-+#include <linux/bootmem.h>
-+#include <linux/seq_file.h>
-+#include <linux/console.h>
-+#include <linux/mca.h>
-+#include <linux/root_dev.h>
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/efi.h>
-+#include <linux/init.h>
-+#include <linux/edd.h>
-+#include <linux/nodemask.h>
-+#include <linux/kernel.h>
-+#include <linux/percpu.h>
-+#include <linux/notifier.h>
-+#include <linux/kexec.h>
-+#include <linux/crash_dump.h>
-+#include <linux/dmi.h>
-+
-+#include <video/edid.h>
-+
-+#include <asm/apic.h>
-+#include <asm/e820.h>
-+#include <asm/mpspec.h>
-+#include <asm/setup.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/sections.h>
-+#include <asm/io_apic.h>
-+#include <asm/ist.h>
-+#include <asm/io.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/memory.h>
-+#include <xen/features.h>
-+#include "setup_arch_pre.h"
-+#include <bios_ebda.h>
-+
-+/* Forward Declaration. */
-+void __init find_max_pfn(void);
-+
-+/* Allows setting of maximum possible memory size */
-+static unsigned long xen_override_max_pfn;
-+
-+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
-+static struct notifier_block xen_panic_block = {
-+ xen_panic_event, NULL, 0 /* try to go last */
-+};
-+
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
-+
-+int disable_pse __devinitdata = 0;
-+
-+/*
-+ * Machine setup..
-+ */
-+
-+#ifdef CONFIG_EFI
-+int efi_enabled = 0;
-+EXPORT_SYMBOL(efi_enabled);
-+#endif
-+
-+/* cpu data as detected by the assembly code in head.S */
-+struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-+/* common cpu data for all cpus */
-+struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
-+EXPORT_SYMBOL(boot_cpu_data);
-+
-+unsigned long mmu_cr4_features;
-+
-+#ifdef CONFIG_ACPI
-+ int acpi_disabled = 0;
-+#else
-+ int acpi_disabled = 1;
-+#endif
-+EXPORT_SYMBOL(acpi_disabled);
-+
-+#ifdef CONFIG_ACPI
-+int __initdata acpi_force = 0;
-+extern acpi_interrupt_flags acpi_sci_flags;
-+#endif
-+
-+/* for MCA, but anyone else can use it if they want */
-+unsigned int machine_id;
-+#ifdef CONFIG_MCA
-+EXPORT_SYMBOL(machine_id);
-+#endif
-+unsigned int machine_submodel_id;
-+unsigned int BIOS_revision;
-+unsigned int mca_pentium_flag;
-+
-+/* For PCI or other memory-mapped resources */
-+unsigned long pci_mem_start = 0x10000000;
-+#ifdef CONFIG_PCI
-+EXPORT_SYMBOL(pci_mem_start);
-+#endif
-+
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type = 0x4e4558; /* XXX get proper XEN boot loader ID */
-+
-+/* user-defined highmem size */
-+static unsigned int highmem_pages = -1;
-+
-+/*
-+ * Setup options
-+ */
-+#ifdef DRIVE_INFO
-+struct drive_info_struct { char dummy[32]; } drive_info;
-+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
-+ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-+EXPORT_SYMBOL(drive_info);
-+#endif
-+#endif
-+struct screen_info screen_info;
-+EXPORT_SYMBOL(screen_info);
-+#ifdef APM_BIOS_INFO
-+struct apm_info apm_info;
-+EXPORT_SYMBOL(apm_info);
-+#endif
-+struct sys_desc_table_struct {
-+ unsigned short length;
-+ unsigned char table[0];
-+};
-+#ifdef EDID_INFO
-+struct edid_info edid_info;
-+EXPORT_SYMBOL_GPL(edid_info);
-+#endif
-+#ifdef IST_INFO
-+struct ist_info ist_info;
-+#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
-+ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
-+EXPORT_SYMBOL(ist_info);
-+#endif
-+#endif
-+struct e820map e820;
-+
-+extern void early_cpu_init(void);
-+extern void generic_apic_probe(char *);
-+extern int root_mountflags;
-+
-+#define RAMDISK_IMAGE_START_MASK 0x07FF
-+#define RAMDISK_PROMPT_FLAG 0x8000
-+#define RAMDISK_LOAD_FLAG 0x4000
-+
-+#ifndef MOUNT_ROOT_RDONLY
-+#define MOUNT_ROOT_RDONLY 1
-+#endif
-+
-+static char command_line[COMMAND_LINE_SIZE];
-+
-+static struct resource data_resource = {
-+ .name = "Kernel data",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource code_resource = {
-+ .name = "Kernel code",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+static struct resource system_rom_resource = {
-+ .name = "System ROM",
-+ .start = 0xf0000,
-+ .end = 0xfffff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+
-+static struct resource extension_rom_resource = {
-+ .name = "Extension ROM",
-+ .start = 0xe0000,
-+ .end = 0xeffff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+
-+static struct resource adapter_rom_resources[] = { {
-+ .name = "Adapter ROM",
-+ .start = 0xc8000,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+ .name = "Adapter ROM",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+} };
-+
-+#define ADAPTER_ROM_RESOURCES \
-+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
-+
-+static struct resource video_rom_resource = {
-+ .name = "Video ROM",
-+ .start = 0xc0000,
-+ .end = 0xc7fff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+#endif
-+
-+static struct resource video_ram_resource = {
-+ .name = "Video RAM area",
-+ .start = 0xa0000,
-+ .end = 0xbffff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource standard_io_resources[] = { {
-+ .name = "dma1",
-+ .start = 0x0000,
-+ .end = 0x001f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "pic1",
-+ .start = 0x0020,
-+ .end = 0x0021,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "timer0",
-+ .start = 0x0040,
-+ .end = 0x0043,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "timer1",
-+ .start = 0x0050,
-+ .end = 0x0053,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "keyboard",
-+ .start = 0x0060,
-+ .end = 0x006f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "dma page reg",
-+ .start = 0x0080,
-+ .end = 0x008f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "pic2",
-+ .start = 0x00a0,
-+ .end = 0x00a1,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "dma2",
-+ .start = 0x00c0,
-+ .end = 0x00df,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+ .name = "fpu",
-+ .start = 0x00f0,
-+ .end = 0x00ff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
-+} };
-+
-+#define STANDARD_IO_RESOURCES \
-+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-+
-+static int __init romchecksum(unsigned char *rom, unsigned long length)
-+{
-+ unsigned char *p, sum = 0;
-+
-+ for (p = rom; p < rom + length; p++)
-+ sum += *p;
-+ return sum == 0;
-+}
-+
-+static void __init probe_roms(void)
-+{
-+ unsigned long start, length, upper;
-+ unsigned char *rom;
-+ int i;
-+
-+ /* Nothing to do if not running in dom0. */
-+ if (!(xen_start_info->flags & SIF_INITDOMAIN))
-+ return;
-+
-+ /* video rom */
-+ upper = adapter_rom_resources[0].start;
-+ for (start = video_rom_resource.start; start < upper; start += 2048) {
-+ rom = isa_bus_to_virt(start);
-+ if (!romsignature(rom))
-+ continue;
-+
-+ video_rom_resource.start = start;
-+
-+ /* 0 < length <= 0x7f * 512, historically */
-+ length = rom[2] * 512;
-+
-+ /* if checksum okay, trust length byte */
-+ if (length && romchecksum(rom, length))
-+ video_rom_resource.end = start + length - 1;
-+
-+ request_resource(&iomem_resource, &video_rom_resource);
-+ break;
-+ }
-+
-+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-+ if (start < upper)
-+ start = upper;
-+
-+ /* system rom */
-+ request_resource(&iomem_resource, &system_rom_resource);
-+ upper = system_rom_resource.start;
-+
-+ /* check for extension rom (ignore length byte!) */
-+ rom = isa_bus_to_virt(extension_rom_resource.start);
-+ if (romsignature(rom)) {
-+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
-+ if (romchecksum(rom, length)) {
-+ request_resource(&iomem_resource, &extension_rom_resource);
-+ upper = extension_rom_resource.start;
-+ }
-+ }
-+
-+ /* check for adapter roms on 2k boundaries */
-+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
-+ rom = isa_bus_to_virt(start);
-+ if (!romsignature(rom))
-+ continue;
-+
-+ /* 0 < length <= 0x7f * 512, historically */
-+ length = rom[2] * 512;
-+
-+ /* but accept any length that fits if checksum okay */
-+ if (!length || start + length > upper || !romchecksum(rom, length))
-+ continue;
-+
-+ adapter_rom_resources[i].start = start;
-+ adapter_rom_resources[i].end = start + length - 1;
-+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
-+
-+ start = adapter_rom_resources[i++].end & ~2047UL;
-+ }
-+}
-+#endif
-+
-+/*
-+ * Point at the empty zero page to start with. We map the real shared_info
-+ * page as soon as fixmap is up and running.
-+ */
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
-+
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
-+EXPORT_SYMBOL(phys_to_machine_mapping);
-+
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
-+
-+static void __init limit_regions(unsigned long long size)
-+{
-+ unsigned long long current_addr = 0;
-+ int i;
-+
-+ if (efi_enabled) {
-+ efi_memory_desc_t *md;
-+ void *p;
-+
-+ for (p = memmap.map, i = 0; p < memmap.map_end;
-+ p += memmap.desc_size, i++) {
-+ md = p;
-+ current_addr = md->phys_addr + (md->num_pages << 12);
-+ if (md->type == EFI_CONVENTIONAL_MEMORY) {
-+ if (current_addr >= size) {
-+ md->num_pages -=
-+ (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
-+ memmap.nr_map = i + 1;
-+ return;
-+ }
-+ }
-+ }
-+ }
-+ for (i = 0; i < e820.nr_map; i++) {
-+ current_addr = e820.map[i].addr + e820.map[i].size;
-+ if (current_addr < size)
-+ continue;
-+
-+ if (e820.map[i].type != E820_RAM)
-+ continue;
-+
-+ if (e820.map[i].addr >= size) {
-+ /*
-+ * This region starts past the end of the
-+ * requested size, skip it completely.
-+ */
-+ e820.nr_map = i;
-+ } else {
-+ e820.nr_map = i + 1;
-+ e820.map[i].size -= current_addr - size;
-+ }
-+ return;
-+ }
-+}
-+
-+static void __init add_memory_region(unsigned long long start,
-+ unsigned long long size, int type)
-+{
-+ int x;
-+
-+ if (!efi_enabled) {
-+ x = e820.nr_map;
-+
-+ if (x == E820MAX) {
-+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+ return;
-+ }
-+
-+ e820.map[x].addr = start;
-+ e820.map[x].size = size;
-+ e820.map[x].type = type;
-+ e820.nr_map++;
-+ }
-+} /* add_memory_region */
-+
-+#define E820_DEBUG 1
-+
-+static void __init print_memory_map(char *who)
-+{
-+ int i;
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+ printk(" %s: %016Lx - %016Lx ", who,
-+ e820.map[i].addr,
-+ e820.map[i].addr + e820.map[i].size);
-+ switch (e820.map[i].type) {
-+ case E820_RAM: printk("(usable)\n");
-+ break;
-+ case E820_RESERVED:
-+ printk("(reserved)\n");
-+ break;
-+ case E820_ACPI:
-+ printk("(ACPI data)\n");
-+ break;
-+ case E820_NVS:
-+ printk("(ACPI NVS)\n");
-+ break;
-+ default: printk("type %lu\n", e820.map[i].type);
-+ break;
-+ }
-+ }
-+}
-+
-+#if 0
-+/*
-+ * Sanitize the BIOS e820 map.
-+ *
-+ * Some e820 responses include overlapping entries. The following
-+ * replaces the original e820 map with a new one, removing overlaps.
-+ *
-+ */
-+struct change_member {
-+ struct e820entry *pbios; /* pointer to original bios entry */
-+ unsigned long long addr; /* address for this change point */
-+};
-+static struct change_member change_point_list[2*E820MAX] __initdata;
-+static struct change_member *change_point[2*E820MAX] __initdata;
-+static struct e820entry *overlap_list[E820MAX] __initdata;
-+static struct e820entry new_bios[E820MAX] __initdata;
-+
-+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-+{
-+ struct change_member *change_tmp;
-+ unsigned long current_type, last_type;
-+ unsigned long long last_addr;
-+ int chgidx, still_changing;
-+ int overlap_entries;
-+ int new_bios_entry;
-+ int old_nr, new_nr, chg_nr;
-+ int i;
-+
-+ /*
-+ Visually we're performing the following (1,2,3,4 = memory types)...
-+
-+ Sample memory map (w/overlaps):
-+ ____22__________________
-+ ______________________4_
-+ ____1111________________
-+ _44_____________________
-+ 11111111________________
-+ ____________________33__
-+ ___________44___________
-+ __________33333_________
-+ ______________22________
-+ ___________________2222_
-+ _________111111111______
-+ _____________________11_
-+ _________________4______
-+
-+ Sanitized equivalent (no overlap):
-+ 1_______________________
-+ _44_____________________
-+ ___1____________________
-+ ____22__________________
-+ ______11________________
-+ _________1______________
-+ __________3_____________
-+ ___________44___________
-+ _____________33_________
-+ _______________2________
-+ ________________1_______
-+ _________________4______
-+ ___________________2____
-+ ____________________33__
-+ ______________________4_
-+ */
-+
-+ /* if there's only one memory region, don't bother */
-+ if (*pnr_map < 2)
-+ return -1;
-+
-+ old_nr = *pnr_map;
-+
-+ /* bail out if we find any unreasonable addresses in bios map */
-+ for (i=0; i<old_nr; i++)
-+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-+ return -1;
-+
-+ /* create pointers for initial change-point information (for sorting) */
-+ for (i=0; i < 2*old_nr; i++)
-+ change_point[i] = &change_point_list[i];
-+
-+ /* record all known change-points (starting and ending addresses),
-+ omitting those that are for empty memory regions */
-+ chgidx = 0;
-+ for (i=0; i < old_nr; i++) {
-+ if (biosmap[i].size != 0) {
-+ change_point[chgidx]->addr = biosmap[i].addr;
-+ change_point[chgidx++]->pbios = &biosmap[i];
-+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+ change_point[chgidx++]->pbios = &biosmap[i];
-+ }
-+ }
-+ chg_nr = chgidx; /* true number of change-points */
-+
-+ /* sort change-point list by memory addresses (low -> high) */
-+ still_changing = 1;
-+ while (still_changing) {
-+ still_changing = 0;
-+ for (i=1; i < chg_nr; i++) {
-+ /* if <current_addr> > <last_addr>, swap */
-+ /* or, if current=<start_addr> & last=<end_addr>, swap */
-+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
-+ ((change_point[i]->addr == change_point[i-1]->addr) &&
-+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
-+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-+ )
-+ {
-+ change_tmp = change_point[i];
-+ change_point[i] = change_point[i-1];
-+ change_point[i-1] = change_tmp;
-+ still_changing=1;
-+ }
-+ }
-+ }
-+
-+ /* create a new bios memory map, removing overlaps */
-+ overlap_entries=0; /* number of entries in the overlap table */
-+ new_bios_entry=0; /* index for creating new bios map entries */
-+ last_type = 0; /* start with undefined memory type */
-+ last_addr = 0; /* start with 0 as last starting address */
-+ /* loop through change-points, determining affect on the new bios map */
-+ for (chgidx=0; chgidx < chg_nr; chgidx++)
-+ {
-+ /* keep track of all overlapping bios entries */
-+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-+ {
-+ /* add map entry to overlap list (> 1 entry implies an overlap) */
-+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-+ }
-+ else
-+ {
-+ /* remove entry from list (order independent, so swap with last) */
-+ for (i=0; i<overlap_entries; i++)
-+ {
-+ if (overlap_list[i] == change_point[chgidx]->pbios)
-+ overlap_list[i] = overlap_list[overlap_entries-1];
-+ }
-+ overlap_entries--;
-+ }
-+ /* if there are overlapping entries, decide which "type" to use */
-+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+ current_type = 0;
-+ for (i=0; i<overlap_entries; i++)
-+ if (overlap_list[i]->type > current_type)
-+ current_type = overlap_list[i]->type;
-+ /* continue building up new bios map based on this information */
-+ if (current_type != last_type) {
-+ if (last_type != 0) {
-+ new_bios[new_bios_entry].size =
-+ change_point[chgidx]->addr - last_addr;
-+ /* move forward only if the new size was non-zero */
-+ if (new_bios[new_bios_entry].size != 0)
-+ if (++new_bios_entry >= E820MAX)
-+ break; /* no more space left for new bios entries */
-+ }
-+ if (current_type != 0) {
-+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+ new_bios[new_bios_entry].type = current_type;
-+ last_addr=change_point[chgidx]->addr;
-+ }
-+ last_type = current_type;
-+ }
-+ }
-+ new_nr = new_bios_entry; /* retain count for new bios entries */
-+
-+ /* copy new bios mapping into original location */
-+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+ *pnr_map = new_nr;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Copy the BIOS e820 map into a safe place.
-+ *
-+ * Sanity-check it while we're at it..
-+ *
-+ * If we're lucky and live on a modern system, the setup code
-+ * will have given us a memory map that we can use to properly
-+ * set up memory. If we aren't, we'll fake a memory map.
-+ *
-+ * We check to see that the memory map contains at least 2 elements
-+ * before we'll use it, because the detection code in setup.S may
-+ * not be perfect and most every PC known to man has two memory
-+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
-+ * thinkpad 560x, for example, does not cooperate with the memory
-+ * detection code.)
-+ */
-+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-+{
-+ /* Only one memory region (or negative)? Ignore it */
-+ if (nr_map < 2)
-+ return -1;
-+
-+ do {
-+ unsigned long long start = biosmap->addr;
-+ unsigned long long size = biosmap->size;
-+ unsigned long long end = start + size;
-+ unsigned long type = biosmap->type;
-+
-+ /* Overflow in 64 bits? Ignore the memory map. */
-+ if (start > end)
-+ return -1;
-+
-+ /*
-+ * Some BIOSes claim RAM in the 640k - 1M region.
-+ * Not right. Fix it up.
-+ */
-+ if (type == E820_RAM) {
-+ if (start < 0x100000ULL && end > 0xA0000ULL) {
-+ if (start < 0xA0000ULL)
-+ add_memory_region(start, 0xA0000ULL-start, type);
-+ if (end <= 0x100000ULL)
-+ continue;
-+ start = 0x100000ULL;
-+ size = end - start;
-+ }
-+ }
-+ add_memory_region(start, size, type);
-+ } while (biosmap++,--nr_map);
-+ return 0;
-+}
-+#endif
-+
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+struct edd edd;
-+#ifdef CONFIG_EDD_MODULE
-+EXPORT_SYMBOL(edd);
-+#endif
-+/**
-+ * copy_edd() - Copy the BIOS EDD information
-+ * from boot_params into a safe place.
-+ *
-+ */
-+static inline void copy_edd(void)
-+{
-+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-+ edd.edd_info_nr = EDD_NR;
-+}
-+#else
-+static inline void copy_edd(void)
-+{
-+}
-+#endif
-+
-+/*
-+ * Do NOT EVER look at the BIOS memory size location.
-+ * It does not work on many machines.
-+ */
-+#define LOWMEMSIZE() (0x9f000)
-+
-+static void __init parse_cmdline_early (char ** cmdline_p)
-+{
-+ char c = ' ', *to = command_line, *from = saved_command_line;
-+ int len = 0, max_cmdline;
-+ int userdef = 0;
-+
-+ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
-+ max_cmdline = COMMAND_LINE_SIZE;
-+ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
-+ /* Save unparsed command line copy for /proc/cmdline */
-+ saved_command_line[max_cmdline-1] = '\0';
-+
-+ for (;;) {
-+ if (c != ' ')
-+ goto next_char;
-+ /*
-+ * "mem=nopentium" disables the 4MB page tables.
-+ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
-+ * to <mem>, overriding the bios size.
-+ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
-+ * <start> to <start>+<mem>, overriding the bios size.
-+ *
-+ * HPA tells me bootloaders need to parse mem=, so no new
-+ * option should be mem= [also see Documentation/i386/boot.txt]
-+ */
-+ if (!memcmp(from, "mem=", 4)) {
-+ if (to != command_line)
-+ to--;
-+ if (!memcmp(from+4, "nopentium", 9)) {
-+ from += 9+4;
-+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+ disable_pse = 1;
-+ } else {
-+ /* If the user specifies memory size, we
-+ * limit the BIOS-provided memory map to
-+ * that size. exactmap can be used to specify
-+ * the exact map. mem=number can be used to
-+ * trim the existing memory map.
-+ */
-+ unsigned long long mem_size;
-+
-+ mem_size = memparse(from+4, &from);
-+#if 0
-+ limit_regions(mem_size);
-+ userdef=1;
-+#else
-+ xen_override_max_pfn =
-+ (unsigned long)(mem_size>>PAGE_SHIFT);
-+#endif
-+ }
-+ }
-+
-+ else if (!memcmp(from, "memmap=", 7)) {
-+ if (to != command_line)
-+ to--;
-+ if (!memcmp(from+7, "exactmap", 8)) {
-+#ifdef CONFIG_CRASH_DUMP
-+ /* If we are doing a crash dump, we
-+ * still need to know the real mem
-+ * size before original memory map is
-+ * reset.
-+ */
-+ find_max_pfn();
-+ saved_max_pfn = max_pfn;
-+#endif
-+ from += 8+7;
-+ e820.nr_map = 0;
-+ userdef = 1;
-+ } else {
-+ /* If the user specifies memory size, we
-+ * limit the BIOS-provided memory map to
-+ * that size. exactmap can be used to specify
-+ * the exact map. mem=number can be used to
-+ * trim the existing memory map.
-+ */
-+ unsigned long long start_at, mem_size;
-+
-+ mem_size = memparse(from+7, &from);
-+ if (*from == '@') {
-+ start_at = memparse(from+1, &from);
-+ add_memory_region(start_at, mem_size, E820_RAM);
-+ } else if (*from == '#') {
-+ start_at = memparse(from+1, &from);
-+ add_memory_region(start_at, mem_size, E820_ACPI);
-+ } else if (*from == '$') {
-+ start_at = memparse(from+1, &from);
-+ add_memory_region(start_at, mem_size, E820_RESERVED);
-+ } else {
-+ limit_regions(mem_size);
-+ userdef=1;
-+ }
-+ }
-+ }
-+
-+ else if (!memcmp(from, "noexec=", 7))
-+ noexec_setup(from + 7);
-+
-+
-+#ifdef CONFIG_X86_MPPARSE
-+ /*
-+ * If the BIOS enumerates physical processors before logical,
-+ * maxcpus=N at enumeration-time can be used to disable HT.
-+ */
-+ else if (!memcmp(from, "maxcpus=", 8)) {
-+ extern unsigned int maxcpus;
-+
-+ maxcpus = simple_strtoul(from + 8, NULL, 0);
-+ }
-+#endif
-+
-+#ifdef CONFIG_ACPI
-+ /* "acpi=off" disables both ACPI table parsing and interpreter */
-+ else if (!memcmp(from, "acpi=off", 8)) {
-+ disable_acpi();
-+ }
-+
-+ /* acpi=force to over-ride black-list */
-+ else if (!memcmp(from, "acpi=force", 10)) {
-+ acpi_force = 1;
-+ acpi_ht = 1;
-+ acpi_disabled = 0;
-+ }
-+
-+ /* acpi=strict disables out-of-spec workarounds */
-+ else if (!memcmp(from, "acpi=strict", 11)) {
-+ acpi_strict = 1;
-+ }
-+
-+ /* Limit ACPI just to boot-time to enable HT */
-+ else if (!memcmp(from, "acpi=ht", 7)) {
-+ if (!acpi_force)
-+ disable_acpi();
-+ acpi_ht = 1;
-+ }
-+
-+ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
-+ else if (!memcmp(from, "pci=noacpi", 10)) {
-+ acpi_disable_pci();
-+ }
-+ /* "acpi=noirq" disables ACPI interrupt routing */
-+ else if (!memcmp(from, "acpi=noirq", 10)) {
-+ acpi_noirq_set();
-+ }
-+
-+ else if (!memcmp(from, "acpi_sci=edge", 13))
-+ acpi_sci_flags.trigger = 1;
-+
-+ else if (!memcmp(from, "acpi_sci=level", 14))
-+ acpi_sci_flags.trigger = 3;
-+
-+ else if (!memcmp(from, "acpi_sci=high", 13))
-+ acpi_sci_flags.polarity = 1;
-+
-+ else if (!memcmp(from, "acpi_sci=low", 12))
-+ acpi_sci_flags.polarity = 3;
-+
-+#ifdef CONFIG_X86_IO_APIC
-+ else if (!memcmp(from, "acpi_skip_timer_override", 24))
-+ acpi_skip_timer_override = 1;
-+
-+ if (!memcmp(from, "disable_timer_pin_1", 19))
-+ disable_timer_pin_1 = 1;
-+ if (!memcmp(from, "enable_timer_pin_1", 18))
-+ disable_timer_pin_1 = -1;
-+
-+ /* disable IO-APIC */
-+ else if (!memcmp(from, "noapic", 6))
-+ disable_ioapic_setup();
-+#endif /* CONFIG_X86_IO_APIC */
-+#endif /* CONFIG_ACPI */
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ /* enable local APIC */
-+ else if (!memcmp(from, "lapic", 5))
-+ lapic_enable();
-+
-+ /* disable local APIC */
-+ else if (!memcmp(from, "nolapic", 6))
-+ lapic_disable();
-+#endif /* CONFIG_X86_LOCAL_APIC */
-+
-+#ifdef CONFIG_KEXEC
-+ /* crashkernel=size@addr specifies the location to reserve for
-+ * a crash kernel. By reserving this memory we guarantee
-+ * that linux never set's it up as a DMA target.
-+ * Useful for holding code to do something appropriate
-+ * after a kernel panic.
-+ */
-+ else if (!memcmp(from, "crashkernel=", 12)) {
-+ unsigned long size, base;
-+ size = memparse(from+12, &from);
-+ if (*from == '@') {
-+ base = memparse(from+1, &from);
-+ /* FIXME: Do I want a sanity check
-+ * to validate the memory range?
-+ */
-+ crashk_res.start = base;
-+ crashk_res.end = base + size - 1;
-+ }
-+ }
-+#endif
-+#ifdef CONFIG_PROC_VMCORE
-+ /* elfcorehdr= specifies the location of elf core header
-+ * stored by the crashed kernel.
-+ */
-+ else if (!memcmp(from, "elfcorehdr=", 11))
-+ elfcorehdr_addr = memparse(from+11, &from);
-+#endif
-+
-+ /*
-+ * highmem=size forces highmem to be exactly 'size' bytes.
-+ * This works even on boxes that have no highmem otherwise.
-+ * This also works to reduce highmem size on bigger boxes.
-+ */
-+ else if (!memcmp(from, "highmem=", 8))
-+ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
-+
-+ /*
-+ * vmalloc=size forces the vmalloc area to be exactly 'size'
-+ * bytes. This can be used to increase (or decrease) the
-+ * vmalloc area - the default is 128m.
-+ */
-+ else if (!memcmp(from, "vmalloc=", 8))
-+ __VMALLOC_RESERVE = memparse(from+8, &from);
-+
-+ next_char:
-+ c = *(from++);
-+ if (!c)
-+ break;
-+ if (COMMAND_LINE_SIZE <= ++len)
-+ break;
-+ *(to++) = c;
-+ }
-+ *to = '\0';
-+ *cmdline_p = command_line;
-+ if (userdef) {
-+ printk(KERN_INFO "user-defined physical RAM map:\n");
-+ print_memory_map("user");
-+ }
-+}
-+
-+#if 0 /* !XEN */
-+/*
-+ * Callback for efi_memory_walk.
-+ */
-+static int __init
-+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
-+{
-+ unsigned long *max_pfn = arg, pfn;
-+
-+ if (start < end) {
-+ pfn = PFN_UP(end -1);
-+ if (pfn > *max_pfn)
-+ *max_pfn = pfn;
-+ }
-+ return 0;
-+}
-+
-+static int __init
-+efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
-+{
-+ memory_present(0, start, end);
-+ return 0;
-+}
-+
-+/*
-+ * Find the highest page frame number we have available
-+ */
-+void __init find_max_pfn(void)
-+{
-+ int i;
-+
-+ max_pfn = 0;
-+ if (efi_enabled) {
-+ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
-+ efi_memmap_walk(efi_memory_present_wrapper, NULL);
-+ return;
-+ }
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+ unsigned long start, end;
-+ /* RAM? */
-+ if (e820.map[i].type != E820_RAM)
-+ continue;
-+ start = PFN_UP(e820.map[i].addr);
-+ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-+ if (start >= end)
-+ continue;
-+ if (end > max_pfn)
-+ max_pfn = end;
-+ memory_present(0, start, end);
-+ }
-+}
-+#else
-+/* We don't use the fake e820 because we need to respond to user override. */
-+void __init find_max_pfn(void)
-+{
-+ if (xen_override_max_pfn == 0) {
-+ max_pfn = xen_start_info->nr_pages;
-+ /* Default 8MB slack (to balance backend allocations). */
-+ max_pfn += 8 << (20 - PAGE_SHIFT);
-+ } else if (xen_override_max_pfn > xen_start_info->nr_pages) {
-+ max_pfn = xen_override_max_pfn;
-+ } else {
-+ max_pfn = xen_start_info->nr_pages;
-+ }
-+}
-+#endif /* XEN */
-+
-+/*
-+ * Determine low and high memory ranges:
-+ */
-+unsigned long __init find_max_low_pfn(void)
-+{
-+ unsigned long max_low_pfn;
-+
-+ max_low_pfn = max_pfn;
-+ if (max_low_pfn > MAXMEM_PFN) {
-+ if (highmem_pages == -1)
-+ highmem_pages = max_pfn - MAXMEM_PFN;
-+ if (highmem_pages + MAXMEM_PFN < max_pfn)
-+ max_pfn = MAXMEM_PFN + highmem_pages;
-+ if (highmem_pages + MAXMEM_PFN > max_pfn) {
-+ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
-+ highmem_pages = 0;
-+ }
-+ max_low_pfn = MAXMEM_PFN;
-+#ifndef CONFIG_HIGHMEM
-+ /* Maximum memory usable is what is directly addressable */
-+ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
-+ MAXMEM>>20);
-+ if (max_pfn > MAX_NONPAE_PFN)
-+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-+ else
-+ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-+ max_pfn = MAXMEM_PFN;
-+#else /* !CONFIG_HIGHMEM */
-+#ifndef CONFIG_X86_PAE
-+ if (max_pfn > MAX_NONPAE_PFN) {
-+ max_pfn = MAX_NONPAE_PFN;
-+ printk(KERN_WARNING "Warning only 4GB will be used.\n");
-+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-+ }
-+#endif /* !CONFIG_X86_PAE */
-+#endif /* !CONFIG_HIGHMEM */
-+ } else {
-+ if (highmem_pages == -1)
-+ highmem_pages = 0;
-+#ifdef CONFIG_HIGHMEM
-+ if (highmem_pages >= max_pfn) {
-+ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
-+ highmem_pages = 0;
-+ }
-+ if (highmem_pages) {
-+ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
-+ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
-+ highmem_pages = 0;
-+ }
-+ max_low_pfn -= highmem_pages;
-+ }
-+#else
-+ if (highmem_pages)
-+ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
-+#endif
-+ }
-+ return max_low_pfn;
-+}
-+
-+/*
-+ * Free all available memory for boot time allocation. Used
-+ * as a callback function by efi_memory_walk()
-+ */
-+
-+static int __init
-+free_available_memory(unsigned long start, unsigned long end, void *arg)
-+{
-+ /* check max_low_pfn */
-+ if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
-+ return 0;
-+ if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
-+ end = (max_low_pfn + 1) << PAGE_SHIFT;
-+ if (start < end)
-+ free_bootmem(start, end - start);
-+
-+ return 0;
-+}
-+/*
-+ * Register fully available low RAM pages with the bootmem allocator.
-+ */
-+static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-+{
-+ int i;
-+
-+ if (efi_enabled) {
-+ efi_memmap_walk(free_available_memory, NULL);
-+ return;
-+ }
-+ for (i = 0; i < e820.nr_map; i++) {
-+ unsigned long curr_pfn, last_pfn, size;
-+ /*
-+ * Reserve usable low memory
-+ */
-+ if (e820.map[i].type != E820_RAM)
-+ continue;
-+ /*
-+ * We are rounding up the start address of usable memory:
-+ */
-+ curr_pfn = PFN_UP(e820.map[i].addr);
-+ if (curr_pfn >= max_low_pfn)
-+ continue;
-+ /*
-+ * ... and at the end of the usable range downwards:
-+ */
-+ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-+
-+ if (last_pfn > max_low_pfn)
-+ last_pfn = max_low_pfn;
-+
-+ /*
-+ * .. finally, did all the rounding and playing
-+ * around just make the area go away?
-+ */
-+ if (last_pfn <= curr_pfn)
-+ continue;
-+
-+ size = last_pfn - curr_pfn;
-+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * workaround for Dell systems that neglect to reserve EBDA
-+ */
-+static void __init reserve_ebda_region(void)
-+{
-+ unsigned int addr;
-+ addr = get_bios_ebda();
-+ if (addr)
-+ reserve_bootmem(addr, PAGE_SIZE);
-+}
-+#endif
-+
-+#ifndef CONFIG_NEED_MULTIPLE_NODES
-+void __init setup_bootmem_allocator(void);
-+static unsigned long __init setup_memory(void)
-+{
-+ /*
-+ * partially used pages are not usable - thus
-+ * we are rounding upwards:
-+ */
-+ min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
-+ xen_start_info->nr_pt_frames;
-+
-+ find_max_pfn();
-+
-+ max_low_pfn = find_max_low_pfn();
-+
-+#ifdef CONFIG_HIGHMEM
-+ highstart_pfn = highend_pfn = max_pfn;
-+ if (max_pfn > max_low_pfn) {
-+ highstart_pfn = max_low_pfn;
-+ }
-+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
-+ pages_to_mb(highend_pfn - highstart_pfn));
-+#endif
-+ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
-+ pages_to_mb(max_low_pfn));
-+
-+ setup_bootmem_allocator();
-+
-+ return max_low_pfn;
-+}
-+
-+void __init zone_sizes_init(void)
-+{
-+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
-+ unsigned int max_dma, low;
-+
-+ /*
-+ * XEN: Our notion of "DMA memory" is fake when running over Xen.
-+ * We simply put all RAM in the DMA zone so that those drivers which
-+ * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
-+ * Those drivers that *do* require lowmem are screwed anyway when
-+ * running over Xen!
-+ */
-+ max_dma = max_low_pfn;
-+ low = max_low_pfn;
-+
-+ if (low < max_dma)
-+ zones_size[ZONE_DMA] = low;
-+ else {
-+ zones_size[ZONE_DMA] = max_dma;
-+ zones_size[ZONE_NORMAL] = low - max_dma;
-+#ifdef CONFIG_HIGHMEM
-+ zones_size[ZONE_HIGHMEM] = highend_pfn - low;
-+#endif
-+ }
-+ free_area_init(zones_size);
-+}
-+#else
-+extern unsigned long __init setup_memory(void);
-+extern void zone_sizes_init(void);
-+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
-+
-+void __init setup_bootmem_allocator(void)
-+{
-+ unsigned long bootmap_size;
-+ /*
-+ * Initialize the boot-time allocator (with low memory only):
-+ */
-+ bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
-+
-+ register_bootmem_low_pages(max_low_pfn);
-+
-+ /*
-+ * Reserve the bootmem bitmap itself as well. We do this in two
-+ * steps (first step was init_bootmem()) because this catches
-+ * the (very unlikely) case of us accidentally initializing the
-+ * bootmem allocator with an invalid RAM area.
-+ */
-+ reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
-+ bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * reserve physical page 0 - it's a special BIOS page on many boxes,
-+ * enabling clean reboots, SMP operation, laptop functions.
-+ */
-+ reserve_bootmem(0, PAGE_SIZE);
-+
-+ /* reserve EBDA region, it's a 4K region */
-+ reserve_ebda_region();
-+
-+ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
-+ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
-+ unless you have no PS/2 mouse plugged in. */
-+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+ boot_cpu_data.x86 == 6)
-+ reserve_bootmem(0xa0000 - 4096, 4096);
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * But first pinch a few for the stack/trampoline stuff
-+ * FIXME: Don't need the extra page at 4K, but need to fix
-+ * trampoline before removing it. (see the GDT stuff)
-+ */
-+ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
-+#endif
-+#ifdef CONFIG_ACPI_SLEEP
-+ /*
-+ * Reserve low memory region for sleep support.
-+ */
-+ acpi_reserve_bootmem();
-+#endif
-+#endif /* !CONFIG_XEN */
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+ if (xen_start_info->mod_start) {
-+ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
-+ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
-+ initrd_start = INITRD_START + PAGE_OFFSET;
-+ initrd_end = initrd_start+INITRD_SIZE;
-+ initrd_below_start_ok = 1;
-+ }
-+ else {
-+ printk(KERN_ERR "initrd extends beyond end of memory "
-+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+ INITRD_START + INITRD_SIZE,
-+ max_low_pfn << PAGE_SHIFT);
-+ initrd_start = 0;
-+ }
-+ }
-+#endif
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap))
-+ phys_to_machine_mapping =
-+ (unsigned long *)xen_start_info->mfn_list;
-+}
-+
-+/*
-+ * The node 0 pgdat is initialized before all of these because
-+ * it's needed for bootmem. node>0 pgdats have their virtual
-+ * space allocated before the pagetables are in place to access
-+ * them, so they can't be cleared then.
-+ *
-+ * This should all compile down to nothing when NUMA is off.
-+ */
-+void __init remapped_pgdat_init(void)
-+{
-+ int nid;
-+
-+ for_each_online_node(nid) {
-+ if (nid != 0)
-+ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
-+ }
-+}
-+
-+/*
-+ * Request address space for all standard RAM and ROM resources
-+ * and also for regions reported as reserved by the e820.
-+ */
-+static void __init
-+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
-+{
-+ int i;
-+#ifdef CONFIG_XEN
-+ dom0_op_t op;
-+ struct dom0_memory_map_entry *map;
-+ unsigned long gapstart, gapsize;
-+ unsigned long long last;
-+#endif
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+ probe_roms();
-+#endif
-+
-+#ifdef CONFIG_XEN
-+ map = alloc_bootmem_low_pages(PAGE_SIZE);
-+ op.cmd = DOM0_PHYSICAL_MEMORY_MAP;
-+ op.u.physical_memory_map.memory_map = map;
-+ op.u.physical_memory_map.max_map_entries =
-+ PAGE_SIZE / sizeof(struct dom0_memory_map_entry);
-+ BUG_ON(HYPERVISOR_dom0_op(&op));
-+
-+ last = 0x100000000ULL;
-+ gapstart = 0x10000000;
-+ gapsize = 0x400000;
-+
-+ for (i = op.u.physical_memory_map.nr_map_entries - 1; i >= 0; i--) {
-+ struct resource *res;
-+
-+ if ((last > map[i].end) && ((last - map[i].end) > gapsize)) {
-+ gapsize = last - map[i].end;
-+ gapstart = map[i].end;
-+ }
-+ if (map[i].start < last)
-+ last = map[i].start;
-+
-+ if (map[i].end > 0x100000000ULL)
-+ continue;
-+ res = alloc_bootmem_low(sizeof(struct resource));
-+ res->name = map[i].is_ram ? "System RAM" : "reserved";
-+ res->start = map[i].start;
-+ res->end = map[i].end - 1;
-+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+ request_resource(&iomem_resource, res);
-+ }
-+
-+ free_bootmem(__pa(map), PAGE_SIZE);
-+
-+ /*
-+ * Start allocating dynamic PCI memory a bit into the gap,
-+ * aligned up to the nearest megabyte.
-+ *
-+ * Question: should we try to pad it up a bit (do something
-+ * like " + (gapsize >> 3)" in there too?). We now have the
-+ * technology.
-+ */
-+ pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
-+
-+ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
-+ pci_mem_start, gapstart, gapsize);
-+#else
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct resource *res;
-+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
-+ continue;
-+ res = alloc_bootmem_low(sizeof(struct resource));
-+ switch (e820.map[i].type) {
-+ case E820_RAM: res->name = "System RAM"; break;
-+ case E820_ACPI: res->name = "ACPI Tables"; break;
-+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
-+ default: res->name = "reserved";
-+ }
-+ res->start = e820.map[i].addr;
-+ res->end = res->start + e820.map[i].size - 1;
-+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+ request_resource(&iomem_resource, res);
-+ if (e820.map[i].type == E820_RAM) {
-+ /*
-+ * We don't know which RAM region contains kernel data,
-+ * so we try it repeatedly and let the resource manager
-+ * test it.
-+ */
-+ request_resource(res, code_resource);
-+ request_resource(res, data_resource);
-+#ifdef CONFIG_KEXEC
-+ request_resource(res, &crashk_res);
-+#endif
-+ }
-+ }
-+#endif
-+#ifdef CONFIG_KEXEC
-+ if (crashk_res.start != crashk_res.end)
-+ reserve_bootmem(crashk_res.start,
-+ crashk_res.end - crashk_res.start + 1);
-+#endif
-+}
-+
-+/*
-+ * Request address space for all standard resources
-+ */
-+static void __init register_memory(void)
-+{
-+#ifndef CONFIG_XEN
-+ unsigned long gapstart, gapsize, round;
-+ unsigned long long last;
-+#endif
-+ int i;
-+
-+ /* Nothing to do if not running in dom0. */
-+ if (!(xen_start_info->flags & SIF_INITDOMAIN))
-+ return;
-+
-+ if (efi_enabled)
-+ efi_initialize_iomem_resources(&code_resource, &data_resource);
-+ else
-+ legacy_init_iomem_resources(&code_resource, &data_resource);
-+
-+ /* EFI systems may still have VGA */
-+ request_resource(&iomem_resource, &video_ram_resource);
-+
-+ /* request I/O space for devices used on all i[345]86 PCs */
-+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-+ request_resource(&ioport_resource, &standard_io_resources[i]);
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * Search for the bigest gap in the low 32 bits of the e820
-+ * memory space.
-+ */
-+ last = 0x100000000ull;
-+ gapstart = 0x10000000;
-+ gapsize = 0x400000;
-+ i = e820.nr_map;
-+ while (--i >= 0) {
-+ unsigned long long start = e820.map[i].addr;
-+ unsigned long long end = start + e820.map[i].size;
-+
-+ /*
-+ * Since "last" is at most 4GB, we know we'll
-+ * fit in 32 bits if this condition is true
-+ */
-+ if (last > end) {
-+ unsigned long gap = last - end;
-+
-+ if (gap > gapsize) {
-+ gapsize = gap;
-+ gapstart = end;
-+ }
-+ }
-+ if (start < last)
-+ last = start;
-+ }
-+
-+ /*
-+ * See how much we want to round up: start off with
-+ * rounding to the next 1MB area.
-+ */
-+ round = 0x100000;
-+ while ((gapsize >> 4) > round)
-+ round += round;
-+ /* Fun with two's complement */
-+ pci_mem_start = (gapstart + round) & -round;
-+
-+ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
-+ pci_mem_start, gapstart, gapsize);
-+#endif
-+}
-+
-+/* Use inline assembly to define this because the nops are defined
-+ as inline assembly strings in the include files and we cannot
-+ get them easily into strings. */
-+asm("\t.data\nintelnops: "
-+ GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
-+ GENERIC_NOP7 GENERIC_NOP8);
-+asm("\t.data\nk8nops: "
-+ K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
-+ K8_NOP7 K8_NOP8);
-+asm("\t.data\nk7nops: "
-+ K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
-+ K7_NOP7 K7_NOP8);
-+
-+extern unsigned char intelnops[], k8nops[], k7nops[];
-+static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
-+ NULL,
-+ intelnops,
-+ intelnops + 1,
-+ intelnops + 1 + 2,
-+ intelnops + 1 + 2 + 3,
-+ intelnops + 1 + 2 + 3 + 4,
-+ intelnops + 1 + 2 + 3 + 4 + 5,
-+ intelnops + 1 + 2 + 3 + 4 + 5 + 6,
-+ intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-+};
-+static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
-+ NULL,
-+ k8nops,
-+ k8nops + 1,
-+ k8nops + 1 + 2,
-+ k8nops + 1 + 2 + 3,
-+ k8nops + 1 + 2 + 3 + 4,
-+ k8nops + 1 + 2 + 3 + 4 + 5,
-+ k8nops + 1 + 2 + 3 + 4 + 5 + 6,
-+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-+};
-+static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
-+ NULL,
-+ k7nops,
-+ k7nops + 1,
-+ k7nops + 1 + 2,
-+ k7nops + 1 + 2 + 3,
-+ k7nops + 1 + 2 + 3 + 4,
-+ k7nops + 1 + 2 + 3 + 4 + 5,
-+ k7nops + 1 + 2 + 3 + 4 + 5 + 6,
-+ k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-+};
-+static struct nop {
-+ int cpuid;
-+ unsigned char **noptable;
-+} noptypes[] = {
-+ { X86_FEATURE_K8, k8_nops },
-+ { X86_FEATURE_K7, k7_nops },
-+ { -1, NULL }
-+};
-+
-+/* Replace instructions with better alternatives for this CPU type.
-+
-+ This runs before SMP is initialized to avoid SMP problems with
-+ self modifying code. This implies that assymetric systems where
-+ APs have less capabilities than the boot processor are not handled.
-+ Tough. Make sure you disable such features by hand. */
-+void apply_alternatives(void *start, void *end)
-+{
-+ struct alt_instr *a;
-+ int diff, i, k;
-+ unsigned char **noptable = intel_nops;
-+ for (i = 0; noptypes[i].cpuid >= 0; i++) {
-+ if (boot_cpu_has(noptypes[i].cpuid)) {
-+ noptable = noptypes[i].noptable;
-+ break;
-+ }
-+ }
-+ for (a = start; (void *)a < end; a++) {
-+ if (!boot_cpu_has(a->cpuid))
-+ continue;
-+ BUG_ON(a->replacementlen > a->instrlen);
-+ memcpy(a->instr, a->replacement, a->replacementlen);
-+ diff = a->instrlen - a->replacementlen;
-+ /* Pad the rest with nops */
-+ for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
-+ k = diff;
-+ if (k > ASM_NOP_MAX)
-+ k = ASM_NOP_MAX;
-+ memcpy(a->instr + i, noptable[k], k);
-+ }
-+ }
-+}
-+
-+void __init alternative_instructions(void)
-+{
-+ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-+ apply_alternatives(__alt_instructions, __alt_instructions_end);
-+}
-+
-+static char * __init machine_specific_memory_setup(void);
-+
-+/*
-+ * Determine if we were loaded by an EFI loader. If so, then we have also been
-+ * passed the efi memmap, systab, etc., so we should use these data structures
-+ * for initialization. Note, the efi init code path is determined by the
-+ * global efi_enabled. This allows the same kernel image to be used on existing
-+ * systems (with a traditional BIOS) as well as on EFI systems.
-+ */
-+void __init setup_arch(char **cmdline_p)
-+{
-+ int i, j, k, fpp;
-+ physdev_op_t op;
-+ unsigned long max_low_pfn;
-+
-+ /* Force a quick death if the kernel panics. */
-+ extern int panic_timeout;
-+ if (panic_timeout == 0)
-+ panic_timeout = 1;
-+
-+ /* Register a call for panic conditions. */
-+ notifier_chain_register(&panic_notifier_list, &xen_panic_block);
-+
-+ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
-+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+ VMASST_TYPE_writable_pagetables);
-+
-+ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
-+ early_cpu_init();
-+
-+ /*
-+ * FIXME: This isn't an official loader_type right
-+ * now but does currently work with elilo.
-+ * If we were configured as an EFI kernel, check to make
-+ * sure that we were loaded correctly from elilo and that
-+ * the system table is valid. If not, then initialize normally.
-+ */
-+#ifdef CONFIG_EFI
-+ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
-+ efi_enabled = 1;
-+#endif
-+
-+ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
-+ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
-+ */
-+ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN) {
-+ /* This is drawn from a dump from vgacon:startup in
-+ * standard Linux. */
-+ screen_info.orig_video_mode = 3;
-+ screen_info.orig_video_isVGA = 1;
-+ screen_info.orig_video_lines = 25;
-+ screen_info.orig_video_cols = 80;
-+ screen_info.orig_video_ega_bx = 3;
-+ screen_info.orig_video_points = 16;
-+ } else
-+ screen_info.orig_video_isVGA = 0;
-+
-+#if defined(CONFIG_BLK_DEV_RAM) && defined(RAMDISK_FLAGS)
-+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
-+
-+ setup_xen_features();
-+
-+ ARCH_SETUP
-+ if (efi_enabled)
-+ efi_init();
-+ else {
-+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+ print_memory_map(machine_specific_memory_setup());
-+ }
-+
-+ copy_edd();
-+
-+ if (!MOUNT_ROOT_RDONLY)
-+ root_mountflags &= ~MS_RDONLY;
-+ init_mm.start_code = (unsigned long) _text;
-+ init_mm.end_code = (unsigned long) _etext;
-+ init_mm.end_data = (unsigned long) _edata;
-+ init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
-+ xen_start_info->nr_pt_frames) << PAGE_SHIFT;
-+
-+ /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
-+ /*code_resource.start = virt_to_phys(_text);*/
-+ /*code_resource.end = virt_to_phys(_etext)-1;*/
-+ /*data_resource.start = virt_to_phys(_etext);*/
-+ /*data_resource.end = virt_to_phys(_edata)-1;*/
-+
-+ parse_cmdline_early(cmdline_p);
-+
-+ max_low_pfn = setup_memory();
-+
-+ /*
-+ * NOTE: before this point _nobody_ is allowed to allocate
-+ * any memory using the bootmem allocator. Although the
-+ * alloctor is now initialised only the first 8Mb of the kernel
-+ * virtual address space has been mapped. All allocations before
-+ * paging_init() has completed must use the alloc_bootmem_low_pages()
-+ * variant (which allocates DMA'able memory) and care must be taken
-+ * not to exceed the 8Mb limit.
-+ */
-+
-+#ifdef CONFIG_SMP
-+ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
-+#endif
-+ paging_init();
-+ remapped_pgdat_init();
-+ sparse_init();
-+ zone_sizes_init();
-+
-+#ifdef CONFIG_X86_FIND_SMP_CONFIG
-+ /*
-+ * Find and reserve possible boot-time SMP configuration:
-+ */
-+ find_smp_config();
-+#endif
-+
-+ /* Make sure we have a correctly sized P->M table. */
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ phys_to_machine_mapping = alloc_bootmem_low_pages(
-+ max_pfn * sizeof(unsigned long));
-+ memset(phys_to_machine_mapping, ~0,
-+ max_pfn * sizeof(unsigned long));
-+ memcpy(phys_to_machine_mapping,
-+ (unsigned long *)xen_start_info->mfn_list,
-+ xen_start_info->nr_pages * sizeof(unsigned long));
-+ free_bootmem(
-+ __pa(xen_start_info->mfn_list),
-+ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+ sizeof(unsigned long))));
-+
-+ /*
-+ * Initialise the list of the frames that specify the list of
-+ * frames that make up the p2m table. Used by save/restore
-+ */
-+ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
-+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+ virt_to_mfn(pfn_to_mfn_frame_list_list);
-+
-+ fpp = PAGE_SIZE/sizeof(unsigned long);
-+ for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
-+ if ((j % fpp) == 0) {
-+ k++;
-+ BUG_ON(k>=16);
-+ pfn_to_mfn_frame_list[k] =
-+ alloc_bootmem_low_pages(PAGE_SIZE);
-+ pfn_to_mfn_frame_list_list[k] =
-+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+ j=0;
-+ }
-+ pfn_to_mfn_frame_list[k][j] =
-+ virt_to_mfn(&phys_to_machine_mapping[i]);
-+ }
-+ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-+ }
-+
-+ /*
-+ * NOTE: at this point the bootmem allocator is fully available.
-+ */
-+
-+#ifdef CONFIG_EARLY_PRINTK
-+ {
-+ char *s = strstr(*cmdline_p, "earlyprintk=");
-+ if (s) {
-+ extern void setup_early_printk(char *);
-+
-+ setup_early_printk(strchr(s, '=') + 1);
-+ printk("early console enabled\n");
-+ }
-+ }
-+#endif
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN)
-+ dmi_scan_machine();
-+
-+#ifdef CONFIG_X86_GENERICARCH
-+ generic_apic_probe(*cmdline_p);
-+#endif
-+ if (efi_enabled)
-+ efi_map_memmap();
-+
-+ op.cmd = PHYSDEVOP_SET_IOPL;
-+ op.u.set_iopl.iopl = 1;
-+ HYPERVISOR_physdev_op(&op);
-+
-+#ifdef CONFIG_ACPI
-+ if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
-+ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
-+ acpi_disabled = 1;
-+ acpi_ht = 0;
-+ }
-+
-+ /*
-+ * Parse the ACPI tables for possible boot-time SMP configuration.
-+ */
-+ acpi_boot_table_init();
-+ acpi_boot_init();
-+
-+#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
-+ if (def_to_bigsmp)
-+ printk(KERN_WARNING "More than 8 CPUs detected and "
-+ "CONFIG_X86_PC cannot handle it.\nUse "
-+ "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
-+#endif
-+#endif
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ if (smp_found_config)
-+ get_smp_config();
-+#endif
-+
-+ /* XXX Disable irqdebug until we have a way to avoid interrupt
-+ * conflicts. */
-+ noirqdebug_setup("");
-+
-+ register_memory();
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN) {
-+ if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+ panic("Xen granted us console access "
-+ "but not privileged status");
-+
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+ if (!efi_enabled ||
-+ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
-+ conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+ conswitchp = &dummy_con;
-+#endif
-+#endif
-+ } else {
-+ extern int console_use_vt;
-+ console_use_vt = 0;
-+ }
-+}
-+
-+static int
-+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+ HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_crash);
-+ /* we're never actually going to get here... */
-+ return NOTIFY_DONE;
-+}
-+
-+#include "setup_arch_post.h"
-+/*
-+ * Local Variables:
-+ * mode:c
-+ * c-file-style:"k&r"
-+ * c-basic-offset:8
-+ * End:
-+ */
-diff --git a/arch/i386/kernel/smp-xen.c b/arch/i386/kernel/smp-xen.c
-new file mode 100644
-index 0000000..6fc6f03
---- /dev/null
-+++ b/arch/i386/kernel/smp-xen.c
-@@ -0,0 +1,617 @@
-+/*
-+ * Intel SMP support routines.
-+ *
-+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
-+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
-+ *
-+ * This code is released under the GNU General Public License version 2 or
-+ * later.
-+ */
-+
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/cache.h>
-+#include <linux/interrupt.h>
-+#include <linux/cpu.h>
-+#include <linux/module.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/tlbflush.h>
-+#if 0
-+#include <mach_apic.h>
-+#endif
-+#include <xen/evtchn.h>
-+
-+/*
-+ * Some notes on x86 processor bugs affecting SMP operation:
-+ *
-+ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
-+ * The Linux implications for SMP are handled as follows:
-+ *
-+ * Pentium III / [Xeon]
-+ * None of the E1AP-E3AP errata are visible to the user.
-+ *
-+ * E1AP. see PII A1AP
-+ * E2AP. see PII A2AP
-+ * E3AP. see PII A3AP
-+ *
-+ * Pentium II / [Xeon]
-+ * None of the A1AP-A3AP errata are visible to the user.
-+ *
-+ * A1AP. see PPro 1AP
-+ * A2AP. see PPro 2AP
-+ * A3AP. see PPro 7AP
-+ *
-+ * Pentium Pro
-+ * None of 1AP-9AP errata are visible to the normal user,
-+ * except occasional delivery of 'spurious interrupt' as trap #15.
-+ * This is very rare and a non-problem.
-+ *
-+ * 1AP. Linux maps APIC as non-cacheable
-+ * 2AP. worked around in hardware
-+ * 3AP. fixed in C0 and above steppings microcode update.
-+ * Linux does not use excessive STARTUP_IPIs.
-+ * 4AP. worked around in hardware
-+ * 5AP. symmetric IO mode (normal Linux operation) not affected.
-+ * 'noapic' mode has vector 0xf filled out properly.
-+ * 6AP. 'noapic' mode might be affected - fixed in later steppings
-+ * 7AP. We do not assume writes to the LVT deassering IRQs
-+ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
-+ * 9AP. We do not use mixed mode
-+ *
-+ * Pentium
-+ * There is a marginal case where REP MOVS on 100MHz SMP
-+ * machines with B stepping processors can fail. XXX should provide
-+ * an L1cache=Writethrough or L1cache=off option.
-+ *
-+ * B stepping CPUs may hang. There are hardware work arounds
-+ * for this. We warn about it in case your board doesn't have the work
-+ * arounds. Basically thats so I can tell anyone with a B stepping
-+ * CPU and SMP problems "tough".
-+ *
-+ * Specific items [From Pentium Processor Specification Update]
-+ *
-+ * 1AP. Linux doesn't use remote read
-+ * 2AP. Linux doesn't trust APIC errors
-+ * 3AP. We work around this
-+ * 4AP. Linux never generated 3 interrupts of the same priority
-+ * to cause a lost local interrupt.
-+ * 5AP. Remote read is never used
-+ * 6AP. not affected - worked around in hardware
-+ * 7AP. not affected - worked around in hardware
-+ * 8AP. worked around in hardware - we get explicit CS errors if not
-+ * 9AP. only 'noapic' mode affected. Might generate spurious
-+ * interrupts, we log only the first one and count the
-+ * rest silently.
-+ * 10AP. not affected - worked around in hardware
-+ * 11AP. Linux reads the APIC between writes to avoid this, as per
-+ * the documentation. Make sure you preserve this as it affects
-+ * the C stepping chips too.
-+ * 12AP. not affected - worked around in hardware
-+ * 13AP. not affected - worked around in hardware
-+ * 14AP. we always deassert INIT during bootup
-+ * 15AP. not affected - worked around in hardware
-+ * 16AP. not affected - worked around in hardware
-+ * 17AP. not affected - worked around in hardware
-+ * 18AP. not affected - worked around in hardware
-+ * 19AP. not affected - worked around in BIOS
-+ *
-+ * If this sounds worrying believe me these bugs are either ___RARE___,
-+ * or are signal timing bugs worked around in hardware and there's
-+ * about nothing of note with C stepping upwards.
-+ */
-+
-+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
-+
-+/*
-+ * the following functions deal with sending IPIs between CPUs.
-+ *
-+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
-+ */
-+
-+static inline int __prepare_ICR (unsigned int shortcut, int vector)
-+{
-+ return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
-+}
-+
-+static inline int __prepare_ICR2 (unsigned int mask)
-+{
-+ return SET_APIC_DEST_FIELD(mask);
-+}
-+
-+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-+
-+static inline void __send_IPI_one(unsigned int cpu, int vector)
-+{
-+ int irq = per_cpu(ipi_to_irq, cpu)[vector];
-+ BUG_ON(irq < 0);
-+ notify_remote_via_irq(irq);
-+}
-+
-+void __send_IPI_shortcut(unsigned int shortcut, int vector)
-+{
-+ int cpu;
-+
-+ switch (shortcut) {
-+ case APIC_DEST_SELF:
-+ __send_IPI_one(smp_processor_id(), vector);
-+ break;
-+ case APIC_DEST_ALLBUT:
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu == smp_processor_id())
-+ continue;
-+ if (cpu_isset(cpu, cpu_online_map)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+ break;
-+ default:
-+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-+ vector);
-+ break;
-+ }
-+}
-+
-+void fastcall send_IPI_self(int vector)
-+{
-+ __send_IPI_shortcut(APIC_DEST_SELF, vector);
-+}
-+
-+/*
-+ * This is only used on smaller machines.
-+ */
-+void send_IPI_mask_bitmask(cpumask_t mask, int vector)
-+{
-+ unsigned long flags;
-+ unsigned int cpu;
-+
-+ local_irq_save(flags);
-+ WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
-+
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu_isset(cpu, mask)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+
-+ local_irq_restore(flags);
-+}
-+
-+void send_IPI_mask_sequence(cpumask_t mask, int vector)
-+{
-+
-+ send_IPI_mask_bitmask(mask, vector);
-+}
-+
-+#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
-+
-+#if 0 /* XEN */
-+/*
-+ * Smarter SMP flushing macros.
-+ * c/o Linus Torvalds.
-+ *
-+ * These mean you can really definitely utterly forget about
-+ * writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
-+ */
-+
-+static cpumask_t flush_cpumask;
-+static struct mm_struct * flush_mm;
-+static unsigned long flush_va;
-+static DEFINE_SPINLOCK(tlbstate_lock);
-+#define FLUSH_ALL 0xffffffff
-+
-+/*
-+ * We cannot call mmdrop() because we are in interrupt context,
-+ * instead update mm->cpu_vm_mask.
-+ *
-+ * We need to reload %cr3 since the page tables may be going
-+ * away from under us..
-+ */
-+static inline void leave_mm (unsigned long cpu)
-+{
-+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-+ BUG();
-+ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
-+ load_cr3(swapper_pg_dir);
-+}
-+
-+/*
-+ *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
-+ * Stop ipi delivery for the old mm. This is not synchronized with
-+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * for the wrong mm, and in the worst case we perform a superflous
-+ * tlb flush.
-+ * 1a2) set cpu_tlbstate to TLBSTATE_OK
-+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ * was in lazy tlb mode.
-+ * 1a3) update cpu_tlbstate[].active_mm
-+ * Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
-+ * Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
-+ * flush ipis.
-+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * Atomically set the bit [other cpus will start sending flush ipis],
-+ * and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
-+ *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ * runs in kernel space, the cpu could load tlb entries for user space
-+ * pages.
-+ *
-+ * The good news is that cpu_tlbstate is local to each cpu, no
-+ * write/read ordering problems.
-+ */
-+
-+/*
-+ * TLB flush IPI:
-+ *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
-+ */
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
-+ struct pt_regs *regs)
-+{
-+ unsigned long cpu;
-+
-+ cpu = get_cpu();
-+
-+ if (!cpu_isset(cpu, flush_cpumask))
-+ goto out;
-+ /*
-+ * This was a BUG() but until someone can quote me the
-+ * line from the intel manual that guarantees an IPI to
-+ * multiple CPUs is retried _only_ on the erroring CPUs
-+ * its staying as a return
-+ *
-+ * BUG();
-+ */
-+
-+ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
-+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
-+ if (flush_va == FLUSH_ALL)
-+ local_flush_tlb();
-+ else
-+ __flush_tlb_one(flush_va);
-+ } else
-+ leave_mm(cpu);
-+ }
-+ smp_mb__before_clear_bit();
-+ cpu_clear(cpu, flush_cpumask);
-+ smp_mb__after_clear_bit();
-+out:
-+ put_cpu_no_resched();
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-+ unsigned long va)
-+{
-+ /*
-+ * A couple of (to be removed) sanity checks:
-+ *
-+ * - current CPU must not be in mask
-+ * - mask must exist :)
-+ */
-+ BUG_ON(cpus_empty(cpumask));
-+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-+ BUG_ON(!mm);
-+
-+ /* If a CPU which we ran on has gone down, OK. */
-+ cpus_and(cpumask, cpumask, cpu_online_map);
-+ if (cpus_empty(cpumask))
-+ return;
-+
-+ /*
-+ * i'm not happy about this global shared spinlock in the
-+ * MM hot path, but we'll see how contended it is.
-+ * Temporarily this turns IRQs off, so that lockups are
-+ * detected by the NMI watchdog.
-+ */
-+ spin_lock(&tlbstate_lock);
-+
-+ flush_mm = mm;
-+ flush_va = va;
-+#if NR_CPUS <= BITS_PER_LONG
-+ atomic_set_mask(cpumask, &flush_cpumask);
-+#else
-+ {
-+ int k;
-+ unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
-+ unsigned long *cpu_mask = (unsigned long *)&cpumask;
-+ for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
-+ atomic_set_mask(cpu_mask[k], &flush_mask[k]);
-+ }
-+#endif
-+ /*
-+ * We have to send the IPI only to
-+ * CPUs affected.
-+ */
-+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
-+
-+ while (!cpus_empty(flush_cpumask))
-+ /* nothing. lockup detection does not belong here */
-+ mb();
-+
-+ flush_mm = NULL;
-+ flush_va = 0;
-+ spin_unlock(&tlbstate_lock);
-+}
-+
-+void flush_tlb_current_task(void)
-+{
-+ struct mm_struct *mm = current->mm;
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ local_flush_tlb();
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+ preempt_enable();
-+}
-+
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ if (current->active_mm == mm) {
-+ if (current->mm)
-+ local_flush_tlb();
-+ else
-+ leave_mm(smp_processor_id());
-+ }
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+
-+ preempt_enable();
-+}
-+
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ if (current->active_mm == mm) {
-+ if(current->mm)
-+ __flush_tlb_one(va);
-+ else
-+ leave_mm(smp_processor_id());
-+ }
-+
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, va);
-+
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL(flush_tlb_page);
-+
-+static void do_flush_tlb_all(void* info)
-+{
-+ unsigned long cpu = smp_processor_id();
-+
-+ __flush_tlb_all();
-+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
-+ leave_mm(cpu);
-+}
-+
-+void flush_tlb_all(void)
-+{
-+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-+}
-+
-+#else
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
-+ struct pt_regs *regs)
-+{ return 0; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm(struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+
-+#endif /* XEN */
-+
-+/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
-+ */
-+void smp_send_reschedule(int cpu)
-+{
-+ WARN_ON(cpu_is_offline(cpu));
-+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-+}
-+
-+/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
-+ */
-+static DEFINE_SPINLOCK(call_lock);
-+
-+struct call_data_struct {
-+ void (*func) (void *info);
-+ void *info;
-+ atomic_t started;
-+ atomic_t finished;
-+ int wait;
-+};
-+
-+void lock_ipi_call_lock(void)
-+{
-+ spin_lock_irq(&call_lock);
-+}
-+
-+void unlock_ipi_call_lock(void)
-+{
-+ spin_unlock_irq(&call_lock);
-+}
-+
-+static struct call_data_struct * call_data;
-+
-+/*
-+ * this function sends a 'generic call function' IPI to all other CPUs
-+ * in the system.
-+ */
-+
-+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-+ int wait)
-+/*
-+ * [SUMMARY] Run a function on all other CPUs.
-+ * <func> The function to run. This must be fast and non-blocking.
-+ * <info> An arbitrary pointer to pass to the function.
-+ * <nonatomic> currently unused.
-+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
-+ * [RETURNS] 0 on success, else a negative status code. Does not return until
-+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ */
-+{
-+ struct call_data_struct data;
-+ int cpus;
-+
-+ /* Holding any lock stops cpus from going down. */
-+ spin_lock(&call_lock);
-+ cpus = num_online_cpus() - 1;
-+ if (!cpus) {
-+ spin_unlock(&call_lock);
-+ return 0;
-+ }
-+
-+ /* Can deadlock when called with interrupts disabled */
-+ WARN_ON(irqs_disabled());
-+
-+ data.func = func;
-+ data.info = info;
-+ atomic_set(&data.started, 0);
-+ data.wait = wait;
-+ if (wait)
-+ atomic_set(&data.finished, 0);
-+
-+ call_data = &data;
-+ mb();
-+
-+ /* Send a message to all other CPUs and wait for them to respond */
-+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-+
-+ /* Wait for response */
-+ while (atomic_read(&data.started) != cpus)
-+ barrier();
-+
-+ if (wait)
-+ while (atomic_read(&data.finished) != cpus)
-+ barrier();
-+ spin_unlock(&call_lock);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(smp_call_function);
-+
-+static void stop_this_cpu (void * dummy)
-+{
-+ /*
-+ * Remove this CPU:
-+ */
-+ cpu_clear(smp_processor_id(), cpu_online_map);
-+ local_irq_disable();
-+#if 0
-+ disable_local_APIC();
-+#endif
-+ if (cpu_data[smp_processor_id()].hlt_works_ok)
-+ for(;;) halt();
-+ for (;;);
-+}
-+
-+/*
-+ * this function calls the 'stop' function on all other CPUs in the system.
-+ */
-+
-+void smp_send_stop(void)
-+{
-+ smp_call_function(stop_this_cpu, NULL, 1, 0);
-+
-+ local_irq_disable();
-+#if 0
-+ disable_local_APIC();
-+#endif
-+ local_irq_enable();
-+}
-+
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
-+ struct pt_regs *regs)
-+{
-+
-+ return IRQ_HANDLED;
-+}
-+
-+#include <linux/kallsyms.h>
-+irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
-+ struct pt_regs *regs)
-+{
-+ void (*func) (void *info) = call_data->func;
-+ void *info = call_data->info;
-+ int wait = call_data->wait;
-+
-+ /*
-+ * Notify initiating CPU that I've grabbed the data and am
-+ * about to execute the function
-+ */
-+ mb();
-+ atomic_inc(&call_data->started);
-+ /*
-+ * At this point the info structure may be out of scope unless wait==1
-+ */
-+ irq_enter();
-+ (*func)(info);
-+ irq_exit();
-+
-+ if (wait) {
-+ mb();
-+ atomic_inc(&call_data->finished);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-diff --git a/arch/i386/kernel/smpalts.c b/arch/i386/kernel/smpalts.c
-new file mode 100644
-index 0000000..5a32e54
---- /dev/null
-+++ b/arch/i386/kernel/smpalts.c
-@@ -0,0 +1,85 @@
-+#include <linux/kernel.h>
-+#include <asm/system.h>
-+#include <asm/smp_alt.h>
-+#include <asm/processor.h>
-+#include <asm/string.h>
-+
-+struct smp_replacement_record {
-+ unsigned char targ_size;
-+ unsigned char smp1_size;
-+ unsigned char smp2_size;
-+ unsigned char up_size;
-+ unsigned char feature;
-+ unsigned char data[0];
-+};
-+
-+struct smp_alternative_record {
-+ void *targ_start;
-+ struct smp_replacement_record *repl;
-+};
-+
-+extern struct smp_alternative_record __start_smp_alternatives_table,
-+ __stop_smp_alternatives_table;
-+extern unsigned long __init_begin, __init_end;
-+
-+void prepare_for_smp(void)
-+{
-+ struct smp_alternative_record *r;
-+ printk(KERN_INFO "Enabling SMP...\n");
-+ for (r = &__start_smp_alternatives_table;
-+ r != &__stop_smp_alternatives_table;
-+ r++) {
-+ BUG_ON(r->repl->targ_size < r->repl->smp1_size);
-+ BUG_ON(r->repl->targ_size < r->repl->smp2_size);
-+ BUG_ON(r->repl->targ_size < r->repl->up_size);
-+ if (system_state == SYSTEM_RUNNING &&
-+ r->targ_start >= (void *)&__init_begin &&
-+ r->targ_start < (void *)&__init_end)
-+ continue;
-+ if (r->repl->feature != (unsigned char)-1 &&
-+ boot_cpu_has(r->repl->feature)) {
-+ memcpy(r->targ_start,
-+ r->repl->data + r->repl->smp1_size,
-+ r->repl->smp2_size);
-+ memset(r->targ_start + r->repl->smp2_size,
-+ 0x90,
-+ r->repl->targ_size - r->repl->smp2_size);
-+ } else {
-+ memcpy(r->targ_start,
-+ r->repl->data,
-+ r->repl->smp1_size);
-+ memset(r->targ_start + r->repl->smp1_size,
-+ 0x90,
-+ r->repl->targ_size - r->repl->smp1_size);
-+ }
-+ }
-+ /* Paranoia */
-+ asm volatile ("jmp 1f\n1:");
-+ mb();
-+}
-+
-+void unprepare_for_smp(void)
-+{
-+ struct smp_alternative_record *r;
-+ printk(KERN_INFO "Disabling SMP...\n");
-+ for (r = &__start_smp_alternatives_table;
-+ r != &__stop_smp_alternatives_table;
-+ r++) {
-+ BUG_ON(r->repl->targ_size < r->repl->smp1_size);
-+ BUG_ON(r->repl->targ_size < r->repl->smp2_size);
-+ BUG_ON(r->repl->targ_size < r->repl->up_size);
-+ if (system_state == SYSTEM_RUNNING &&
-+ r->targ_start >= (void *)&__init_begin &&
-+ r->targ_start < (void *)&__init_end)
-+ continue;
-+ memcpy(r->targ_start,
-+ r->repl->data + r->repl->smp1_size + r->repl->smp2_size,
-+ r->repl->up_size);
-+ memset(r->targ_start + r->repl->up_size,
-+ 0x90,
-+ r->repl->targ_size - r->repl->up_size);
-+ }
-+ /* Paranoia */
-+ asm volatile ("jmp 1f\n1:");
-+ mb();
-+}
-diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
-index eba7f53..378708c 100644
---- a/arch/i386/kernel/smpboot.c
-+++ b/arch/i386/kernel/smpboot.c
-@@ -1208,6 +1208,11 @@ static void __init smp_boot_cpus(unsigne
- if (max_cpus <= cpucount+1)
- continue;
-
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+ if (kicked == 1)
-+ prepare_for_smp();
-+#endif
-+
- if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
- printk("CPU #%d not responding - cannot use it.\n",
- apicid);
-@@ -1386,6 +1391,11 @@ int __devinit __cpu_up(unsigned int cpu)
- return -EIO;
- }
-
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+ if (num_online_cpus() == 1)
-+ prepare_for_smp();
-+#endif
-+
- local_irq_enable();
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
- /* Unleash the CPU! */
-diff --git a/arch/i386/kernel/swiotlb.c b/arch/i386/kernel/swiotlb.c
-new file mode 100644
-index 0000000..e493e34
---- /dev/null
-+++ b/arch/i386/kernel/swiotlb.c
-@@ -0,0 +1,674 @@
-+/*
-+ * Dynamic DMA mapping support.
-+ *
-+ * This implementation is a fallback for platforms that do not support
-+ * I/O TLBs (aka DMA address translation hardware).
-+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
-+ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
-+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
-+ * David Mosberger-Tang <davidm@hpl.hp.com>
-+ * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
-+ */
-+
-+#include <linux/cache.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/highmem.h>
-+#include <asm/io.h>
-+#include <asm/pci.h>
-+#include <asm/dma.h>
-+#include <asm/uaccess.h>
-+#include <xen/interface/memory.h>
-+
-+int swiotlb;
-+EXPORT_SYMBOL(swiotlb);
-+
-+#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
-+
-+#define SG_ENT_PHYS_ADDRESS(sg) (page_to_phys((sg)->page) + (sg)->offset)
-+
-+/*
-+ * Maximum allowable number of contiguous slabs to map,
-+ * must be a power of 2. What is the appropriate value ?
-+ * The complexity of {map,unmap}_single is linearly dependent on this value.
-+ */
-+#define IO_TLB_SEGSIZE 128
-+
-+/*
-+ * log of the size of each IO TLB slab. The number of slabs is command line
-+ * controllable.
-+ */
-+#define IO_TLB_SHIFT 11
-+
-+static int swiotlb_force;
-+static char *iotlb_virt_start;
-+static unsigned long iotlb_nslabs;
-+
-+/*
-+ * Used to do a quick range check in swiotlb_unmap_single and
-+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
-+ * API.
-+ */
-+static dma_addr_t iotlb_bus_start, iotlb_bus_end, iotlb_bus_mask;
-+
-+/* Does the given dma address reside within the swiotlb aperture? */
-+#define in_swiotlb_aperture(a) (!(((a) ^ iotlb_bus_start) & iotlb_bus_mask))
-+
-+/*
-+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
-+ */
-+static unsigned long io_tlb_overflow = 32*1024;
-+
-+void *io_tlb_overflow_buffer;
-+
-+/*
-+ * This is a free list describing the number of free entries available from
-+ * each index
-+ */
-+static unsigned int *io_tlb_list;
-+static unsigned int io_tlb_index;
-+
-+/*
-+ * We need to save away the original address corresponding to a mapped entry
-+ * for the sync operations.
-+ */
-+static struct phys_addr {
-+ struct page *page;
-+ unsigned int offset;
-+} *io_tlb_orig_addr;
-+
-+/*
-+ * Protect the above data structures in the map and unmap calls
-+ */
-+static DEFINE_SPINLOCK(io_tlb_lock);
-+
-+static int __init
-+setup_io_tlb_npages(char *str)
-+{
-+ /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
-+ if (isdigit(*str)) {
-+ iotlb_nslabs = simple_strtoul(str, &str, 0) <<
-+ (20 - IO_TLB_SHIFT);
-+ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+ /* Round up to power of two (xen_create_contiguous_region). */
-+ while (iotlb_nslabs & (iotlb_nslabs-1))
-+ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+ }
-+ if (*str == ',')
-+ ++str;
-+ /*
-+ * NB. 'force' enables the swiotlb, but doesn't force its use for
-+ * every DMA like it does on native Linux. 'off' forcibly disables
-+ * use of the swiotlb.
-+ */
-+ if (!strcmp(str, "force"))
-+ swiotlb_force = 1;
-+ else if (!strcmp(str, "off"))
-+ swiotlb_force = -1;
-+ return 1;
-+}
-+__setup("swiotlb=", setup_io_tlb_npages);
-+/* make io_tlb_overflow tunable too? */
-+
-+/*
-+ * Statically reserve bounce buffer space and initialize bounce buffer data
-+ * structures for the software IO TLB used to implement the PCI DMA API.
-+ */
-+void
-+swiotlb_init_with_default_size (size_t default_size)
-+{
-+ unsigned long i, bytes;
-+ int rc;
-+
-+ if (!iotlb_nslabs) {
-+ iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
-+ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+ /* Round up to power of two (xen_create_contiguous_region). */
-+ while (iotlb_nslabs & (iotlb_nslabs-1))
-+ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+ }
-+
-+ bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
-+
-+ /*
-+ * Get IO TLB memory from the low pages
-+ */
-+ iotlb_virt_start = alloc_bootmem_low_pages(bytes);
-+ if (!iotlb_virt_start)
-+ panic("Cannot allocate SWIOTLB buffer!\n"
-+ "Use dom0_mem Xen boot parameter to reserve\n"
-+ "some DMA memory (e.g., dom0_mem=-128M).\n");
-+
-+ /* Hardcode 31 address bits for now: aacraid limitation. */
-+ rc = xen_create_contiguous_region(
-+ (unsigned long)iotlb_virt_start, get_order(bytes), 31);
-+ BUG_ON(rc);
-+
-+ /*
-+ * Allocate and initialize the free list array. This array is used
-+ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
-+ */
-+ io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
-+ for (i = 0; i < iotlb_nslabs; i++)
-+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-+ io_tlb_index = 0;
-+ io_tlb_orig_addr = alloc_bootmem(
-+ iotlb_nslabs * sizeof(*io_tlb_orig_addr));
-+
-+ /*
-+ * Get the overflow emergency buffer
-+ */
-+ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
-+
-+ iotlb_bus_start = virt_to_bus(iotlb_virt_start);
-+ iotlb_bus_end = iotlb_bus_start + bytes;
-+ iotlb_bus_mask = ~(dma_addr_t)(bytes - 1);
-+
-+ printk(KERN_INFO "Software IO TLB enabled: \n"
-+ " Aperture: %lu megabytes\n"
-+ " Bus range: 0x%016lx - 0x%016lx\n"
-+ " Kernel range: 0x%016lx - 0x%016lx\n",
-+ bytes >> 20,
-+ (unsigned long)iotlb_bus_start,
-+ (unsigned long)iotlb_bus_end,
-+ (unsigned long)iotlb_virt_start,
-+ (unsigned long)iotlb_virt_start + bytes);
-+}
-+
-+void
-+swiotlb_init(void)
-+{
-+ long ram_end;
-+ size_t defsz = 64 * (1 << 20); /* 64MB default size */
-+
-+ if (swiotlb_force == 1) {
-+ swiotlb = 1;
-+ } else if ((swiotlb_force != -1) &&
-+ (xen_start_info->flags & SIF_INITDOMAIN)) {
-+ /* Domain 0 always has a swiotlb. */
-+ ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+ if (ram_end <= 0x7ffff)
-+ defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
-+ swiotlb = 1;
-+ }
-+
-+ if (swiotlb)
-+ swiotlb_init_with_default_size(defsz);
-+ else
-+ printk(KERN_INFO "Software IO TLB disabled\n");
-+}
-+
-+/*
-+ * We use __copy_to_user to transfer to the host buffer because the buffer
-+ * may be mapped read-only (e.g, in blkback driver) but lower-level
-+ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
-+ * unnecessary copy from the aperture to the host buffer, and a page fault.
-+ */
-+static void
-+__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
-+{
-+ if (PageHighMem(buffer.page)) {
-+ size_t len, bytes;
-+ char *dev, *host, *kmp;
-+ len = size;
-+ while (len != 0) {
-+ if (((bytes = len) + buffer.offset) > PAGE_SIZE)
-+ bytes = PAGE_SIZE - buffer.offset;
-+ kmp = kmap_atomic(buffer.page, KM_SWIOTLB);
-+ dev = dma_addr + size - len;
-+ host = kmp + buffer.offset;
-+ if (dir == DMA_FROM_DEVICE) {
-+ if (__copy_to_user(host, dev, bytes))
-+ /* inaccessible */;
-+ } else
-+ memcpy(dev, host, bytes);
-+ kunmap_atomic(kmp, KM_SWIOTLB);
-+ len -= bytes;
-+ buffer.page++;
-+ buffer.offset = 0;
-+ }
-+ } else {
-+ char *host = (char *)phys_to_virt(
-+ page_to_pseudophys(buffer.page)) + buffer.offset;
-+ if (dir == DMA_FROM_DEVICE) {
-+ if (__copy_to_user(host, dma_addr, size))
-+ /* inaccessible */;
-+ } else if (dir == DMA_TO_DEVICE)
-+ memcpy(dma_addr, host, size);
-+ }
-+}
-+
-+/*
-+ * Allocates bounce buffer and returns its kernel virtual address.
-+ */
-+static void *
-+map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
-+{
-+ unsigned long flags;
-+ char *dma_addr;
-+ unsigned int nslots, stride, index, wrap;
-+ int i;
-+
-+ /*
-+ * For mappings greater than a page, we limit the stride (and
-+ * hence alignment) to a page size.
-+ */
-+ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+ if (size > PAGE_SIZE)
-+ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
-+ else
-+ stride = 1;
-+
-+ BUG_ON(!nslots);
-+
-+ /*
-+ * Find suitable number of IO TLB entries size that will fit this
-+ * request and allocate a buffer from that IO TLB pool.
-+ */
-+ spin_lock_irqsave(&io_tlb_lock, flags);
-+ {
-+ wrap = index = ALIGN(io_tlb_index, stride);
-+
-+ if (index >= iotlb_nslabs)
-+ wrap = index = 0;
-+
-+ do {
-+ /*
-+ * If we find a slot that indicates we have 'nslots'
-+ * number of contiguous buffers, we allocate the
-+ * buffers from that slot and mark the entries as '0'
-+ * indicating unavailable.
-+ */
-+ if (io_tlb_list[index] >= nslots) {
-+ int count = 0;
-+
-+ for (i = index; i < (int)(index + nslots); i++)
-+ io_tlb_list[i] = 0;
-+ for (i = index - 1;
-+ (OFFSET(i, IO_TLB_SEGSIZE) !=
-+ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+ i--)
-+ io_tlb_list[i] = ++count;
-+ dma_addr = iotlb_virt_start +
-+ (index << IO_TLB_SHIFT);
-+
-+ /*
-+ * Update the indices to avoid searching in
-+ * the next round.
-+ */
-+ io_tlb_index =
-+ ((index + nslots) < iotlb_nslabs
-+ ? (index + nslots) : 0);
-+
-+ goto found;
-+ }
-+ index += stride;
-+ if (index >= iotlb_nslabs)
-+ index = 0;
-+ } while (index != wrap);
-+
-+ spin_unlock_irqrestore(&io_tlb_lock, flags);
-+ return NULL;
-+ }
-+ found:
-+ spin_unlock_irqrestore(&io_tlb_lock, flags);
-+
-+ /*
-+ * Save away the mapping from the original address to the DMA address.
-+ * This is needed when we sync the memory. Then we sync the buffer if
-+ * needed.
-+ */
-+ io_tlb_orig_addr[index] = buffer;
-+ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
-+
-+ return dma_addr;
-+}
-+
-+/*
-+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
-+ */
-+static void
-+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-+{
-+ unsigned long flags;
-+ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+ struct phys_addr buffer = io_tlb_orig_addr[index];
-+
-+ /*
-+ * First, sync the memory before unmapping the entry
-+ */
-+ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
-+
-+ /*
-+ * Return the buffer to the free list by setting the corresponding
-+ * entries to indicate the number of contigous entries available.
-+ * While returning the entries to the free list, we merge the entries
-+ * with slots below and above the pool being returned.
-+ */
-+ spin_lock_irqsave(&io_tlb_lock, flags);
-+ {
-+ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
-+ io_tlb_list[index + nslots] : 0);
-+ /*
-+ * Step 1: return the slots to the free list, merging the
-+ * slots with superceeding slots
-+ */
-+ for (i = index + nslots - 1; i >= index; i--)
-+ io_tlb_list[i] = ++count;
-+ /*
-+ * Step 2: merge the returned slots with the preceding slots,
-+ * if available (non zero)
-+ */
-+ for (i = index - 1;
-+ (OFFSET(i, IO_TLB_SEGSIZE) !=
-+ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+ i--)
-+ io_tlb_list[i] = ++count;
-+ }
-+ spin_unlock_irqrestore(&io_tlb_lock, flags);
-+}
-+
-+static void
-+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-+{
-+ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+ struct phys_addr buffer = io_tlb_orig_addr[index];
-+ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
-+ __sync_single(buffer, dma_addr, size, dir);
-+}
-+
-+static void
-+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
-+{
-+ /*
-+ * Ran out of IOMMU space for this operation. This is very bad.
-+ * Unfortunately the drivers cannot handle this operation properly.
-+ * unless they check for pci_dma_mapping_error (most don't)
-+ * When the mapping is small enough return a static buffer to limit
-+ * the damage, or panic when the transfer is too big.
-+ */
-+ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
-+ "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
-+
-+ if (size > io_tlb_overflow && do_panic) {
-+ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+ panic("PCI-DMA: Memory would be corrupted\n");
-+ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+ panic("PCI-DMA: Random memory would be DMAed\n");
-+ }
-+}
-+
-+/*
-+ * Map a single buffer of the indicated size for DMA in streaming mode. The
-+ * PCI address to use is returned.
-+ *
-+ * Once the device is given the dma address, the device owns this memory until
-+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
-+ */
-+dma_addr_t
-+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
-+{
-+ dma_addr_t dev_addr = virt_to_bus(ptr);
-+ void *map;
-+ struct phys_addr buffer;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ /*
-+ * If the pointer passed in happens to be in the device's DMA window,
-+ * we can safely return the device addr and not worry about bounce
-+ * buffering it.
-+ */
-+ if (!range_straddles_page_boundary(ptr, size) &&
-+ !address_needs_mapping(hwdev, dev_addr))
-+ return dev_addr;
-+
-+ /*
-+ * Oh well, have to allocate and map a bounce buffer.
-+ */
-+ buffer.page = virt_to_page(ptr);
-+ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
-+ map = map_single(hwdev, buffer, size, dir);
-+ if (!map) {
-+ swiotlb_full(hwdev, size, dir, 1);
-+ map = io_tlb_overflow_buffer;
-+ }
-+
-+ dev_addr = virt_to_bus(map);
-+ return dev_addr;
-+}
-+
-+/*
-+ * Unmap a single streaming mode DMA translation. The dma_addr and size must
-+ * match what was provided for in a previous swiotlb_map_single call. All
-+ * other usages are undefined.
-+ *
-+ * After this call, reads by the cpu to the buffer are guaranteed to see
-+ * whatever the device wrote there.
-+ */
-+void
-+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
-+ int dir)
-+{
-+ BUG_ON(dir == DMA_NONE);
-+ if (in_swiotlb_aperture(dev_addr))
-+ unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+/*
-+ * Make physical memory consistent for a single streaming mode DMA translation
-+ * after a transfer.
-+ *
-+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
-+ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
-+ * call this function before doing so. At the next point you give the PCI dma
-+ * address back to the card, you must first perform a
-+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
-+ */
-+void
-+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-+ size_t size, int dir)
-+{
-+ BUG_ON(dir == DMA_NONE);
-+ if (in_swiotlb_aperture(dev_addr))
-+ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+void
-+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-+ size_t size, int dir)
-+{
-+ BUG_ON(dir == DMA_NONE);
-+ if (in_swiotlb_aperture(dev_addr))
-+ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+/*
-+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
-+ * This is the scatter-gather version of the above swiotlb_map_single
-+ * interface. Here the scatter gather list elements are each tagged with the
-+ * appropriate dma address and length. They are obtained via
-+ * sg_dma_{address,length}(SG).
-+ *
-+ * NOTE: An implementation may be able to use a smaller number of
-+ * DMA address/length pairs than there are SG table elements.
-+ * (for example via virtual mapping capabilities)
-+ * The routine returns the number of addr/length pairs actually
-+ * used, at most nents.
-+ *
-+ * Device ownership issues as mentioned above for swiotlb_map_single are the
-+ * same here.
-+ */
-+int
-+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+ int dir)
-+{
-+ struct phys_addr buffer;
-+ dma_addr_t dev_addr;
-+ char *map;
-+ int i;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ for (i = 0; i < nelems; i++, sg++) {
-+ dev_addr = SG_ENT_PHYS_ADDRESS(sg);
-+ if (address_needs_mapping(hwdev, dev_addr)) {
-+ buffer.page = sg->page;
-+ buffer.offset = sg->offset;
-+ map = map_single(hwdev, buffer, sg->length, dir);
-+ if (!map) {
-+ /* Don't panic here, we expect map_sg users
-+ to do proper error handling. */
-+ swiotlb_full(hwdev, sg->length, dir, 0);
-+ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
-+ sg[0].dma_length = 0;
-+ return 0;
-+ }
-+ sg->dma_address = (dma_addr_t)virt_to_bus(map);
-+ } else
-+ sg->dma_address = dev_addr;
-+ sg->dma_length = sg->length;
-+ }
-+ return nelems;
-+}
-+
-+/*
-+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
-+ * concerning calls here are the same as for swiotlb_unmap_single() above.
-+ */
-+void
-+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+ int dir)
-+{
-+ int i;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ for (i = 0; i < nelems; i++, sg++)
-+ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+ unmap_single(hwdev,
-+ (void *)bus_to_virt(sg->dma_address),
-+ sg->dma_length, dir);
-+}
-+
-+/*
-+ * Make physical memory consistent for a set of streaming mode DMA translations
-+ * after a transfer.
-+ *
-+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
-+ * and usage.
-+ */
-+void
-+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-+ int nelems, int dir)
-+{
-+ int i;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ for (i = 0; i < nelems; i++, sg++)
-+ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+ sync_single(hwdev,
-+ (void *)bus_to_virt(sg->dma_address),
-+ sg->dma_length, dir);
-+}
-+
-+void
-+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-+ int nelems, int dir)
-+{
-+ int i;
-+
-+ BUG_ON(dir == DMA_NONE);
-+
-+ for (i = 0; i < nelems; i++, sg++)
-+ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+ sync_single(hwdev,
-+ (void *)bus_to_virt(sg->dma_address),
-+ sg->dma_length, dir);
-+}
-+
-+dma_addr_t
-+swiotlb_map_page(struct device *hwdev, struct page *page,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ struct phys_addr buffer;
-+ dma_addr_t dev_addr;
-+ char *map;
-+
-+ dev_addr = page_to_phys(page) + offset;
-+ if (address_needs_mapping(hwdev, dev_addr)) {
-+ buffer.page = page;
-+ buffer.offset = offset;
-+ map = map_single(hwdev, buffer, size, direction);
-+ if (!map) {
-+ swiotlb_full(hwdev, size, direction, 1);
-+ map = io_tlb_overflow_buffer;
-+ }
-+ dev_addr = (dma_addr_t)virt_to_bus(map);
-+ }
-+
-+ return dev_addr;
-+}
-+
-+void
-+swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+ size_t size, enum dma_data_direction direction)
-+{
-+ BUG_ON(direction == DMA_NONE);
-+ if (in_swiotlb_aperture(dma_address))
-+ unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
-+}
-+
-+int
-+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
-+{
-+ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
-+}
-+
-+/*
-+ * Return whether the given PCI device DMA address mask can be supported
-+ * properly. For example, if your device can only drive the low 24-bits
-+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
-+ * this function.
-+ */
-+int
-+swiotlb_dma_supported (struct device *hwdev, u64 mask)
-+{
-+ return (mask >= (iotlb_bus_end - 1));
-+}
-+
-+EXPORT_SYMBOL(swiotlb_init);
-+EXPORT_SYMBOL(swiotlb_map_single);
-+EXPORT_SYMBOL(swiotlb_unmap_single);
-+EXPORT_SYMBOL(swiotlb_map_sg);
-+EXPORT_SYMBOL(swiotlb_unmap_sg);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
-+EXPORT_SYMBOL(swiotlb_map_page);
-+EXPORT_SYMBOL(swiotlb_unmap_page);
-+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
-+EXPORT_SYMBOL(swiotlb_dma_supported);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
-index 0bada18..99193bb 100644
---- a/arch/i386/kernel/sysenter.c
-+++ b/arch/i386/kernel/sysenter.c
-@@ -13,6 +13,7 @@
- #include <linux/gfp.h>
- #include <linux/string.h>
- #include <linux/elf.h>
-+#include <linux/mm.h>
-
- #include <asm/cpufeature.h>
- #include <asm/msr.h>
-@@ -23,6 +24,7 @@ extern asmlinkage void sysenter_entry(vo
-
- void enable_sep_cpu(void)
- {
-+#ifdef CONFIG_X86_SYSENTER
- int cpu = get_cpu();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
-
-@@ -37,6 +39,7 @@ void enable_sep_cpu(void)
- wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
- wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
- put_cpu();
-+#endif
- }
-
- /*
-@@ -45,23 +48,90 @@ void enable_sep_cpu(void)
- */
- extern const char vsyscall_int80_start, vsyscall_int80_end;
- extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
-+static void *syscall_page;
-
- int __init sysenter_setup(void)
- {
-- void *page = (void *)get_zeroed_page(GFP_ATOMIC);
-+ syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
-
-- __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
--
-- if (!boot_cpu_has(X86_FEATURE_SEP)) {
-- memcpy(page,
-- &vsyscall_int80_start,
-- &vsyscall_int80_end - &vsyscall_int80_start);
-+#ifdef CONFIG_X86_SYSENTER
-+ if (boot_cpu_has(X86_FEATURE_SEP)) {
-+ memcpy(syscall_page,
-+ &vsyscall_sysenter_start,
-+ &vsyscall_sysenter_end - &vsyscall_sysenter_start);
- return 0;
- }
-+#endif
-
-- memcpy(page,
-- &vsyscall_sysenter_start,
-- &vsyscall_sysenter_end - &vsyscall_sysenter_start);
-+ memcpy(syscall_page,
-+ &vsyscall_int80_start,
-+ &vsyscall_int80_end - &vsyscall_int80_start);
-
- return 0;
- }
-+
-+static struct page*
-+syscall_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
-+{
-+ struct page *p = virt_to_page(adr - vma->vm_start + syscall_page);
-+ get_page(p);
-+ return p;
-+}
-+
-+/* Prevent VMA merging */
-+static void syscall_vma_close(struct vm_area_struct *vma)
-+{
-+}
-+
-+static struct vm_operations_struct syscall_vm_ops = {
-+ .close = syscall_vma_close,
-+ .nopage = syscall_nopage,
-+};
-+
-+/* Setup a VMA at program startup for the vsyscall page */
-+int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
-+{
-+ struct vm_area_struct *vma;
-+ struct mm_struct *mm = current->mm;
-+ int ret;
-+
-+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-+ if (!vma)
-+ return -ENOMEM;
-+
-+ memset(vma, 0, sizeof(struct vm_area_struct));
-+ /* Could randomize here */
-+ vma->vm_start = VSYSCALL_BASE;
-+ vma->vm_end = VSYSCALL_BASE + PAGE_SIZE;
-+ /* MAYWRITE to allow gdb to COW and set breakpoints */
-+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
-+ vma->vm_flags |= mm->def_flags;
-+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
-+ vma->vm_ops = &syscall_vm_ops;
-+ vma->vm_mm = mm;
-+
-+ down_write(&mm->mmap_sem);
-+ if ((ret = insert_vm_struct(mm, vma))) {
-+ up_write(&mm->mmap_sem);
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return ret;
-+ }
-+ mm->total_vm++;
-+ up_write(&mm->mmap_sem);
-+ return 0;
-+}
-+
-+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-+{
-+ return NULL;
-+}
-+
-+int in_gate_area(struct task_struct *task, unsigned long addr)
-+{
-+ return 0;
-+}
-+
-+int in_gate_area_no_task(unsigned long addr)
-+{
-+ return 0;
-+}
-diff --git a/arch/i386/kernel/time-xen.c b/arch/i386/kernel/time-xen.c
-new file mode 100644
-index 0000000..39cf69d
---- /dev/null
-+++ b/arch/i386/kernel/time-xen.c
-@@ -0,0 +1,1137 @@
-+/*
-+ * linux/arch/i386/kernel/time.c
-+ *
-+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
-+ *
-+ * This file contains the PC-specific time handling details:
-+ * reading the RTC at bootup, etc..
-+ * 1994-07-02 Alan Modra
-+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
-+ * 1995-03-26 Markus Kuhn
-+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
-+ * precision CMOS clock update
-+ * 1996-05-03 Ingo Molnar
-+ * fixed time warps in do_[slow|fast]_gettimeoffset()
-+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
-+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
-+ * 1998-09-05 (Various)
-+ * More robust do_fast_gettimeoffset() algorithm implemented
-+ * (works with APM, Cyrix 6x86MX and Centaur C6),
-+ * monotonic gettimeofday() with fast_get_timeoffset(),
-+ * drift-proof precision TSC calibration on boot
-+ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
-+ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
-+ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
-+ * 1998-12-16 Andrea Arcangeli
-+ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
-+ * because was not accounting lost_ticks.
-+ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
-+ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
-+ * serialize accesses to xtime/lost_ticks).
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/param.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/time.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/sysdev.h>
-+#include <linux/bcd.h>
-+#include <linux/efi.h>
-+#include <linux/mca.h>
-+#include <linux/sysctl.h>
-+#include <linux/percpu.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/posix-timers.h>
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/irq.h>
-+#include <asm/msr.h>
-+#include <asm/delay.h>
-+#include <asm/mpspec.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#ifdef __i386__
-+#include <asm/timer.h>
-+#include "mach_time.h"
-+#endif
-+#ifdef __x86_64__
-+#include <asm/sections.h>
-+#include <linux/mc146818rtc.h>
-+#endif
-+
-+#include <linux/timex.h>
-+#include <linux/config.h>
-+
-+#include <asm/hpet.h>
-+
-+#include <asm/arch_hooks.h>
-+
-+#include <xen/evtchn.h>
-+#include <xen/interface/vcpu.h>
-+
-+#if defined (__i386__)
-+#include <asm/i8259.h>
-+#endif
-+
-+int pit_latch_buggy; /* extern */
-+
-+#if defined(__x86_64__)
-+unsigned long vxtime_hz = PIT_TICK_RATE;
-+struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
-+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
-+unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
-+struct timespec __xtime __section_xtime;
-+struct timezone __sys_tz __section_sys_tz;
-+#endif
-+
-+unsigned int cpu_khz; /* Detected as we calibrate the TSC */
-+EXPORT_SYMBOL(cpu_khz);
-+
-+extern unsigned long wall_jiffies;
-+
-+DEFINE_SPINLOCK(rtc_lock);
-+EXPORT_SYMBOL(rtc_lock);
-+
-+#if defined (__i386__)
-+#include <asm/i8253.h>
-+#endif
-+
-+DEFINE_SPINLOCK(i8253_lock);
-+EXPORT_SYMBOL(i8253_lock);
-+
-+/* These are peridically updated in shared_info, and then copied here. */
-+struct shadow_time_info {
-+ u64 tsc_timestamp; /* TSC at last update of time vals. */
-+ u64 system_timestamp; /* Time, in nanosecs, since boot. */
-+ u32 tsc_to_nsec_mul;
-+ u32 tsc_to_usec_mul;
-+ int tsc_shift;
-+ u32 version;
-+};
-+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
-+static struct timespec shadow_tv;
-+static u32 shadow_tv_version;
-+
-+/* Keep track of last time we did processing/updating of jiffies and xtime. */
-+static u64 processed_system_time; /* System time (ns) at last processing. */
-+static DEFINE_PER_CPU(u64, processed_system_time);
-+
-+/* How much CPU time was spent blocked and how much was 'stolen'? */
-+static DEFINE_PER_CPU(u64, processed_stolen_time);
-+static DEFINE_PER_CPU(u64, processed_blocked_time);
-+
-+/* Current runstate of each CPU (updated automatically by the hypervisor). */
-+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
-+
-+/* Must be signed, as it's compared with s64 quantities which can be -ve. */
-+#define NS_PER_TICK (1000000000LL/HZ)
-+
-+static inline void __normalize_time(time_t *sec, s64 *nsec)
-+{
-+ while (*nsec >= NSEC_PER_SEC) {
-+ (*nsec) -= NSEC_PER_SEC;
-+ (*sec)++;
-+ }
-+ while (*nsec < 0) {
-+ (*nsec) += NSEC_PER_SEC;
-+ (*sec)--;
-+ }
-+}
-+
-+/* Does this guest OS track Xen time, or set its wall clock independently? */
-+static int independent_wallclock = 0;
-+static int __init __independent_wallclock(char *str)
-+{
-+ independent_wallclock = 1;
-+ return 1;
-+}
-+__setup("independent_wallclock", __independent_wallclock);
-+
-+#ifdef __i386__
-+static void delay_tsc(unsigned long loops)
-+{
-+ unsigned long bclock, now;
-+
-+ rdtscl(bclock);
-+ do {
-+ rep_nop();
-+ rdtscl(now);
-+ } while ((now - bclock) < loops);
-+}
-+
-+static struct timer_opts timer_tsc = {
-+ .name = "tsc",
-+ .delay = delay_tsc,
-+};
-+struct timer_opts *cur_timer __read_mostly = &timer_tsc;
-+#endif
-+
-+/*
-+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
-+ * yielding a 64-bit result.
-+ */
-+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-+{
-+ u64 product;
-+#ifdef __i386__
-+ u32 tmp1, tmp2;
-+#endif
-+
-+ if (shift < 0)
-+ delta >>= -shift;
-+ else
-+ delta <<= shift;
-+
-+#ifdef __i386__
-+ __asm__ (
-+ "mul %5 ; "
-+ "mov %4,%%eax ; "
-+ "mov %%edx,%4 ; "
-+ "mul %5 ; "
-+ "xor %5,%5 ; "
-+ "add %4,%%eax ; "
-+ "adc %5,%%edx ; "
-+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
-+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-+#else
-+ __asm__ (
-+ "mul %%rdx ; shrd $32,%%rdx,%%rax"
-+ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-+#endif
-+
-+ return product;
-+}
-+
-+#if defined (__i386__)
-+int read_current_timer(unsigned long *timer_val)
-+{
-+ rdtscl(*timer_val);
-+ return 0;
-+}
-+#endif
-+
-+void init_cpu_khz(void)
-+{
-+ u64 __cpu_khz = 1000000ULL << 32;
-+ struct vcpu_time_info *info;
-+ info = &HYPERVISOR_shared_info->vcpu_info[0].time;
-+ do_div(__cpu_khz, info->tsc_to_system_mul);
-+ if (info->tsc_shift < 0)
-+ cpu_khz = __cpu_khz << -info->tsc_shift;
-+ else
-+ cpu_khz = __cpu_khz >> info->tsc_shift;
-+}
-+
-+static u64 get_nsec_offset(struct shadow_time_info *shadow)
-+{
-+ u64 now, delta;
-+ rdtscll(now);
-+ delta = now - shadow->tsc_timestamp;
-+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
-+}
-+
-+static unsigned long get_usec_offset(struct shadow_time_info *shadow)
-+{
-+ u64 now, delta;
-+ rdtscll(now);
-+ delta = now - shadow->tsc_timestamp;
-+ return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
-+}
-+
-+static void __update_wallclock(time_t sec, long nsec)
-+{
-+ long wtm_nsec, xtime_nsec;
-+ time_t wtm_sec, xtime_sec;
-+ u64 tmp, wc_nsec;
-+
-+ /* Adjust wall-clock time base based on wall_jiffies ticks. */
-+ wc_nsec = processed_system_time;
-+ wc_nsec += sec * (u64)NSEC_PER_SEC;
-+ wc_nsec += nsec;
-+ wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
-+
-+ /* Split wallclock base into seconds and nanoseconds. */
-+ tmp = wc_nsec;
-+ xtime_nsec = do_div(tmp, 1000000000);
-+ xtime_sec = (time_t)tmp;
-+
-+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
-+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
-+
-+ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
-+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-+
-+ ntp_clear();
-+}
-+
-+static void update_wallclock(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+
-+ do {
-+ shadow_tv_version = s->wc_version;
-+ rmb();
-+ shadow_tv.tv_sec = s->wc_sec;
-+ shadow_tv.tv_nsec = s->wc_nsec;
-+ rmb();
-+ } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
-+
-+ if (!independent_wallclock)
-+ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
-+}
-+
-+/*
-+ * Reads a consistent set of time-base values from Xen, into a shadow data
-+ * area.
-+ */
-+static void get_time_values_from_xen(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ struct vcpu_time_info *src;
-+ struct shadow_time_info *dst;
-+
-+ src = &s->vcpu_info[smp_processor_id()].time;
-+ dst = &per_cpu(shadow_time, smp_processor_id());
-+
-+ do {
-+ dst->version = src->version;
-+ rmb();
-+ dst->tsc_timestamp = src->tsc_timestamp;
-+ dst->system_timestamp = src->system_time;
-+ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
-+ dst->tsc_shift = src->tsc_shift;
-+ rmb();
-+ } while ((src->version & 1) | (dst->version ^ src->version));
-+
-+ dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
-+}
-+
-+static inline int time_values_up_to_date(int cpu)
-+{
-+ struct vcpu_time_info *src;
-+ struct shadow_time_info *dst;
-+
-+ src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
-+ dst = &per_cpu(shadow_time, cpu);
-+
-+ return (dst->version == src->version);
-+}
-+
-+#ifdef __i386__
-+/*
-+ * This is a special lock that is owned by the CPU and holds the index
-+ * register we are working with. It is required for NMI access to the
-+ * CMOS/RTC registers. See include/asm-i386/mc146818rtc.h for details.
-+ */
-+volatile unsigned long cmos_lock = 0;
-+EXPORT_SYMBOL(cmos_lock);
-+
-+/* Routines for accessing the CMOS RAM/RTC. */
-+unsigned char rtc_cmos_read(unsigned char addr)
-+{
-+ unsigned char val;
-+ lock_cmos_prefix(addr);
-+ outb_p(addr, RTC_PORT(0));
-+ val = inb_p(RTC_PORT(1));
-+ lock_cmos_suffix(addr);
-+ return val;
-+}
-+EXPORT_SYMBOL(rtc_cmos_read);
-+
-+void rtc_cmos_write(unsigned char val, unsigned char addr)
-+{
-+ lock_cmos_prefix(addr);
-+ outb_p(addr, RTC_PORT(0));
-+ outb_p(val, RTC_PORT(1));
-+ lock_cmos_suffix(addr);
-+}
-+EXPORT_SYMBOL(rtc_cmos_write);
-+#endif
-+
-+/*
-+ * This version of gettimeofday has microsecond resolution
-+ * and better than microsecond precision on fast x86 machines with TSC.
-+ */
-+void do_gettimeofday(struct timeval *tv)
-+{
-+ unsigned long seq;
-+ unsigned long usec, sec;
-+ unsigned long max_ntp_tick;
-+ s64 nsec;
-+ unsigned int cpu;
-+ struct shadow_time_info *shadow;
-+ u32 local_time_version;
-+
-+ cpu = get_cpu();
-+ shadow = &per_cpu(shadow_time, cpu);
-+
-+ do {
-+ unsigned long lost;
-+
-+ local_time_version = shadow->version;
-+ seq = read_seqbegin(&xtime_lock);
-+
-+ usec = get_usec_offset(shadow);
-+ lost = jiffies - wall_jiffies;
-+
-+ /*
-+ * If time_adjust is negative then NTP is slowing the clock
-+ * so make sure not to go into next possible interval.
-+ * Better to lose some accuracy than have time go backwards..
-+ */
-+ if (unlikely(time_adjust < 0)) {
-+ max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
-+ usec = min(usec, max_ntp_tick);
-+
-+ if (lost)
-+ usec += lost * max_ntp_tick;
-+ }
-+ else if (unlikely(lost))
-+ usec += lost * (USEC_PER_SEC / HZ);
-+
-+ sec = xtime.tv_sec;
-+ usec += (xtime.tv_nsec / NSEC_PER_USEC);
-+
-+ nsec = shadow->system_timestamp - processed_system_time;
-+ __normalize_time(&sec, &nsec);
-+ usec += (long)nsec / NSEC_PER_USEC;
-+
-+ if (unlikely(!time_values_up_to_date(cpu))) {
-+ /*
-+ * We may have blocked for a long time,
-+ * rendering our calculations invalid
-+ * (e.g. the time delta may have
-+ * overflowed). Detect that and recalculate
-+ * with fresh values.
-+ */
-+ get_time_values_from_xen();
-+ continue;
-+ }
-+ } while (read_seqretry(&xtime_lock, seq) ||
-+ (local_time_version != shadow->version));
-+
-+ put_cpu();
-+
-+ while (usec >= USEC_PER_SEC) {
-+ usec -= USEC_PER_SEC;
-+ sec++;
-+ }
-+
-+ tv->tv_sec = sec;
-+ tv->tv_usec = usec;
-+}
-+
-+EXPORT_SYMBOL(do_gettimeofday);
-+
-+int do_settimeofday(struct timespec *tv)
-+{
-+ time_t sec;
-+ s64 nsec;
-+ unsigned int cpu;
-+ struct shadow_time_info *shadow;
-+ dom0_op_t op;
-+
-+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
-+ return -EINVAL;
-+
-+ cpu = get_cpu();
-+ shadow = &per_cpu(shadow_time, cpu);
-+
-+ write_seqlock_irq(&xtime_lock);
-+
-+ /*
-+ * Ensure we don't get blocked for a long time so that our time delta
-+ * overflows. If that were to happen then our shadow time values would
-+ * be stale, so we can retry with fresh ones.
-+ */
-+ for (;;) {
-+ nsec = tv->tv_nsec - get_nsec_offset(shadow);
-+ if (time_values_up_to_date(cpu))
-+ break;
-+ get_time_values_from_xen();
-+ }
-+ sec = tv->tv_sec;
-+ __normalize_time(&sec, &nsec);
-+
-+ if ((xen_start_info->flags & SIF_INITDOMAIN) &&
-+ !independent_wallclock) {
-+ op.cmd = DOM0_SETTIME;
-+ op.u.settime.secs = sec;
-+ op.u.settime.nsecs = nsec;
-+ op.u.settime.system_time = shadow->system_timestamp;
-+ HYPERVISOR_dom0_op(&op);
-+ update_wallclock();
-+ } else if (independent_wallclock) {
-+ nsec -= shadow->system_timestamp;
-+ __normalize_time(&sec, &nsec);
-+ __update_wallclock(sec, nsec);
-+ }
-+
-+ write_sequnlock_irq(&xtime_lock);
-+
-+ put_cpu();
-+
-+ clock_was_set();
-+ return 0;
-+}
-+
-+EXPORT_SYMBOL(do_settimeofday);
-+
-+static void sync_xen_wallclock(unsigned long dummy);
-+static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
-+static void sync_xen_wallclock(unsigned long dummy)
-+{
-+ time_t sec;
-+ s64 nsec;
-+ dom0_op_t op;
-+
-+ if (!ntp_synced() || independent_wallclock ||
-+ !(xen_start_info->flags & SIF_INITDOMAIN))
-+ return;
-+
-+ write_seqlock_irq(&xtime_lock);
-+
-+ sec = xtime.tv_sec;
-+ nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
-+ __normalize_time(&sec, &nsec);
-+
-+ op.cmd = DOM0_SETTIME;
-+ op.u.settime.secs = sec;
-+ op.u.settime.nsecs = nsec;
-+ op.u.settime.system_time = processed_system_time;
-+ HYPERVISOR_dom0_op(&op);
-+
-+ update_wallclock();
-+
-+ write_sequnlock_irq(&xtime_lock);
-+
-+ /* Once per minute. */
-+ mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
-+}
-+
-+#ifdef __i386__
-+static int set_rtc_mmss(unsigned long nowtime)
-+{
-+ int retval;
-+
-+ WARN_ON(irqs_disabled());
-+
-+ if (independent_wallclock || !(xen_start_info->flags & SIF_INITDOMAIN))
-+ return 0;
-+
-+ /* gets recalled with irq locally disabled */
-+ spin_lock_irq(&rtc_lock);
-+ if (efi_enabled)
-+ retval = efi_set_rtc_mmss(nowtime);
-+ else
-+ retval = mach_set_rtc_mmss(nowtime);
-+ spin_unlock_irq(&rtc_lock);
-+
-+ return retval;
-+}
-+#endif
-+
-+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
-+ * Note: This function is required to return accurate
-+ * time even in the absence of multiple timer ticks.
-+ */
-+unsigned long long monotonic_clock(void)
-+{
-+ int cpu = get_cpu();
-+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+ u64 time;
-+ u32 local_time_version;
-+
-+ do {
-+ local_time_version = shadow->version;
-+ smp_rmb();
-+ time = shadow->system_timestamp + get_nsec_offset(shadow);
-+ if (!time_values_up_to_date(cpu))
-+ get_time_values_from_xen();
-+ smp_rmb();
-+ } while (local_time_version != shadow->version);
-+
-+ put_cpu();
-+
-+ return time;
-+}
-+EXPORT_SYMBOL(monotonic_clock);
-+
-+unsigned long long sched_clock(void)
-+{
-+ return monotonic_clock();
-+}
-+
-+#if defined(__x86_64__)
-+unsigned long profile_pc(struct pt_regs *regs)
-+{
-+ unsigned long pc = instruction_pointer(regs);
-+
-+ /* Assume the lock function has either no stack frame or only a single word.
-+ This checks if the address on the stack looks like a kernel text address.
-+ There is a small window for false hits, but in that case the tick
-+ is just accounted to the spinlock function.
-+ Better would be to write these functions in assembler again
-+ and check exactly. */
-+ if (in_lock_functions(pc)) {
-+ char *v = *(char **)regs->rsp;
-+ if ((v >= _stext && v <= _etext) ||
-+ (v >= _sinittext && v <= _einittext) ||
-+ (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
-+ return (unsigned long)v;
-+ return ((unsigned long *)regs->rsp)[1];
-+ }
-+ return pc;
-+}
-+EXPORT_SYMBOL(profile_pc);
-+#elif defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+unsigned long profile_pc(struct pt_regs *regs)
-+{
-+ unsigned long pc = instruction_pointer(regs);
-+
-+#ifdef __x86_64__
-+ /* Assume the lock function has either no stack frame or only a single word.
-+ This checks if the address on the stack looks like a kernel text address.
-+ There is a small window for false hits, but in that case the tick
-+ is just accounted to the spinlock function.
-+ Better would be to write these functions in assembler again
-+ and check exactly. */
-+ if (in_lock_functions(pc)) {
-+ char *v = *(char **)regs->rsp;
-+ if ((v >= _stext && v <= _etext) ||
-+ (v >= _sinittext && v <= _einittext) ||
-+ (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
-+ return (unsigned long)v;
-+ return ((unsigned long *)regs->rsp)[1];
-+ }
-+#else
-+ if (in_lock_functions(pc))
-+ return *(unsigned long *)(regs->ebp + 4);
-+#endif
-+
-+ return pc;
-+}
-+EXPORT_SYMBOL(profile_pc);
-+#endif
-+
-+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ s64 delta, delta_cpu, stolen, blocked;
-+ u64 sched_time;
-+ int i, cpu = smp_processor_id();
-+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
-+
-+ write_seqlock(&xtime_lock);
-+
-+ do {
-+ get_time_values_from_xen();
-+
-+ /* Obtain a consistent snapshot of elapsed wallclock cycles. */
-+ delta = delta_cpu =
-+ shadow->system_timestamp + get_nsec_offset(shadow);
-+ delta -= processed_system_time;
-+ delta_cpu -= per_cpu(processed_system_time, cpu);
-+
-+ /*
-+ * Obtain a consistent snapshot of stolen/blocked cycles. We
-+ * can use state_entry_time to detect if we get preempted here.
-+ */
-+ do {
-+ sched_time = runstate->state_entry_time;
-+ barrier();
-+ stolen = runstate->time[RUNSTATE_runnable] +
-+ runstate->time[RUNSTATE_offline] -
-+ per_cpu(processed_stolen_time, cpu);
-+ blocked = runstate->time[RUNSTATE_blocked] -
-+ per_cpu(processed_blocked_time, cpu);
-+ barrier();
-+ } while (sched_time != runstate->state_entry_time);
-+ } while (!time_values_up_to_date(cpu));
-+
-+ if ((unlikely(delta < -1000000LL) || unlikely(delta_cpu < 0))
-+ && printk_ratelimit()) {
-+ printk("Timer ISR/%d: Time went backwards: "
-+ "delta=%lld cpu_delta=%lld shadow=%lld "
-+ "off=%lld processed=%lld cpu_processed=%lld\n",
-+ cpu, delta, delta_cpu, shadow->system_timestamp,
-+ (s64)get_nsec_offset(shadow),
-+ processed_system_time,
-+ per_cpu(processed_system_time, cpu));
-+ for (i = 0; i < num_online_cpus(); i++)
-+ printk(" %d: %lld\n", i,
-+ per_cpu(processed_system_time, i));
-+ }
-+
-+ /* System-wide jiffy work. */
-+ while (delta >= NS_PER_TICK) {
-+ delta -= NS_PER_TICK;
-+ processed_system_time += NS_PER_TICK;
-+ do_timer(regs);
-+ }
-+
-+ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
-+ update_wallclock();
-+ clock_was_set();
-+ }
-+
-+ write_sequnlock(&xtime_lock);
-+
-+ /*
-+ * Account stolen ticks.
-+ * HACK: Passing NULL to account_steal_time()
-+ * ensures that the ticks are accounted as stolen.
-+ */
-+ if (stolen > 0) {
-+ delta_cpu -= stolen;
-+ do_div(stolen, NS_PER_TICK);
-+ per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
-+ per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
-+ account_steal_time(NULL, (cputime_t)stolen);
-+ }
-+
-+ /*
-+ * Account blocked ticks.
-+ * HACK: Passing idle_task to account_steal_time()
-+ * ensures that the ticks are accounted as idle/wait.
-+ */
-+ if (blocked > 0) {
-+ delta_cpu -= blocked;
-+ do_div(blocked, NS_PER_TICK);
-+ per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
-+ per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK;
-+ account_steal_time(idle_task(cpu), (cputime_t)blocked);
-+ }
-+
-+ /* Account user/system ticks. */
-+ if (delta_cpu > 0) {
-+ do_div(delta_cpu, NS_PER_TICK);
-+ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
-+ if (user_mode(regs))
-+ account_user_time(current, (cputime_t)delta_cpu);
-+ else
-+ account_system_time(current, HARDIRQ_OFFSET,
-+ (cputime_t)delta_cpu);
-+ }
-+
-+ /* Local timer processing (see update_process_times()). */
-+ run_local_timers();
-+ if (rcu_pending(cpu))
-+ rcu_check_callbacks(cpu, user_mode(regs));
-+ scheduler_tick();
-+ run_posix_cpu_timers(current);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void init_missing_ticks_accounting(int cpu)
-+{
-+ struct vcpu_register_runstate_memory_area area;
-+ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
-+
-+ memset(runstate, 0, sizeof(*runstate));
-+
-+ area.addr.v = runstate;
-+ HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
-+
-+ per_cpu(processed_blocked_time, cpu) =
-+ runstate->time[RUNSTATE_blocked];
-+ per_cpu(processed_stolen_time, cpu) =
-+ runstate->time[RUNSTATE_runnable] +
-+ runstate->time[RUNSTATE_offline];
-+}
-+
-+/* not static: needed by APM */
-+unsigned long get_cmos_time(void)
-+{
-+#ifdef __i386__
-+ unsigned long retval;
-+
-+ spin_lock(&rtc_lock);
-+
-+ if (efi_enabled)
-+ retval = efi_get_time();
-+ else
-+ retval = mach_get_cmos_time();
-+
-+ spin_unlock(&rtc_lock);
-+
-+ return retval;
-+#else
-+ unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
-+ unsigned char uip = 0, this = 0;
-+ unsigned long flags;
-+
-+/*
-+ * The Linux interpretation of the CMOS clock register contents: When the
-+ * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
-+ * second which has precisely just started. Waiting for this can take up to 1
-+ * second, we timeout approximately after 2.4 seconds on a machine with
-+ * standard 8.3 MHz ISA bus.
-+ */
-+
-+ spin_lock_irqsave(&rtc_lock, flags);
-+
-+ while (timeout && (!uip || this)) {
-+ uip |= this;
-+ this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
-+ timeout--;
-+ }
-+
-+/*
-+ * Here we are safe to assume the registers won't change for a whole second, so
-+ * we just go ahead and read them.
-+ */
-+
-+ sec = CMOS_READ(RTC_SECONDS);
-+ min = CMOS_READ(RTC_MINUTES);
-+ hour = CMOS_READ(RTC_HOURS);
-+ day = CMOS_READ(RTC_DAY_OF_MONTH);
-+ mon = CMOS_READ(RTC_MONTH);
-+ year = CMOS_READ(RTC_YEAR);
-+
-+ spin_unlock_irqrestore(&rtc_lock, flags);
-+
-+/*
-+ * We know that x86-64 always uses BCD format, no need to check the config
-+ * register.
-+ */
-+
-+ BCD_TO_BIN(sec);
-+ BCD_TO_BIN(min);
-+ BCD_TO_BIN(hour);
-+ BCD_TO_BIN(day);
-+ BCD_TO_BIN(mon);
-+ BCD_TO_BIN(year);
-+
-+/*
-+ * x86-64 systems only exists since 2002.
-+ * This will work up to Dec 31, 2100
-+ */
-+ year += 2000;
-+
-+ return mktime(year, mon, day, hour, min, sec);
-+#endif
-+}
-+
-+#ifdef __i386__
-+static void sync_cmos_clock(unsigned long dummy);
-+
-+static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
-+
-+static void sync_cmos_clock(unsigned long dummy)
-+{
-+ struct timeval now, next;
-+ int fail = 1;
-+
-+ /*
-+ * If we have an externally synchronized Linux clock, then update
-+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-+ * called as close as possible to 500 ms before the new second starts.
-+ * This code is run on a timer. If the clock is set, that timer
-+ * may not expire at the correct time. Thus, we adjust...
-+ */
-+ if (!ntp_synced())
-+ /*
-+ * Not synced, exit, do not restart a timer (if one is
-+ * running, let it run out).
-+ */
-+ return;
-+
-+ do_gettimeofday(&now);
-+ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
-+ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
-+ fail = set_rtc_mmss(now.tv_sec);
-+
-+ next.tv_usec = USEC_AFTER - now.tv_usec;
-+ if (next.tv_usec <= 0)
-+ next.tv_usec += USEC_PER_SEC;
-+
-+ if (!fail)
-+ next.tv_sec = 659;
-+ else
-+ next.tv_sec = 0;
-+
-+ if (next.tv_usec >= USEC_PER_SEC) {
-+ next.tv_sec++;
-+ next.tv_usec -= USEC_PER_SEC;
-+ }
-+ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
-+}
-+
-+void notify_arch_cmos_timer(void)
-+{
-+ mod_timer(&sync_cmos_timer, jiffies + 1);
-+ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
-+}
-+#endif
-+
-+static long clock_cmos_diff, sleep_start;
-+
-+static int timer_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+ /*
-+ * Estimate time zone so that set_time can update the clock
-+ */
-+ clock_cmos_diff = -get_cmos_time();
-+ clock_cmos_diff += get_seconds();
-+ sleep_start = get_cmos_time();
-+ return 0;
-+}
-+
-+static int timer_resume(struct sys_device *dev)
-+{
-+ unsigned long flags;
-+ unsigned long sec;
-+ unsigned long sleep_length;
-+
-+#ifdef CONFIG_HPET_TIMER
-+ if (is_hpet_enabled())
-+ hpet_reenable();
-+#endif
-+ sec = get_cmos_time() + clock_cmos_diff;
-+ sleep_length = (get_cmos_time() - sleep_start) * HZ;
-+ write_seqlock_irqsave(&xtime_lock, flags);
-+ xtime.tv_sec = sec;
-+ xtime.tv_nsec = 0;
-+ write_sequnlock_irqrestore(&xtime_lock, flags);
-+ jiffies += sleep_length;
-+ wall_jiffies += sleep_length;
-+ touch_softlockup_watchdog();
-+ return 0;
-+}
-+
-+static struct sysdev_class timer_sysclass = {
-+ .resume = timer_resume,
-+ .suspend = timer_suspend,
-+ set_kset_name("timer"),
-+};
-+
-+
-+/* XXX this driverfs stuff should probably go elsewhere later -john */
-+static struct sys_device device_timer = {
-+ .id = 0,
-+ .cls = &timer_sysclass,
-+};
-+
-+static int time_init_device(void)
-+{
-+ int error = sysdev_class_register(&timer_sysclass);
-+ if (!error)
-+ error = sysdev_register(&device_timer);
-+ return error;
-+}
-+
-+device_initcall(time_init_device);
-+
-+#ifdef CONFIG_HPET_TIMER
-+extern void (*late_time_init)(void);
-+/* Duplicate of time_init() below, with hpet_enable part added */
-+static void __init hpet_time_init(void)
-+{
-+ xtime.tv_sec = get_cmos_time();
-+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
-+ set_normalized_timespec(&wall_to_monotonic,
-+ -xtime.tv_sec, -xtime.tv_nsec);
-+
-+ if ((hpet_enable() >= 0) && hpet_use_timer) {
-+ printk("Using HPET for base-timer\n");
-+ }
-+
-+ cur_timer = select_timer();
-+ printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
-+
-+ time_init_hook();
-+}
-+#endif
-+
-+/* Dynamically-mapped IRQ. */
-+DEFINE_PER_CPU(int, timer_irq);
-+
-+extern void (*late_time_init)(void);
-+static void setup_cpu0_timer_irq(void)
-+{
-+ per_cpu(timer_irq, 0) =
-+ bind_virq_to_irqhandler(
-+ VIRQ_TIMER,
-+ 0,
-+ timer_interrupt,
-+ SA_INTERRUPT,
-+ "timer0",
-+ NULL);
-+ BUG_ON(per_cpu(timer_irq, 0) < 0);
-+}
-+
-+void __init time_init(void)
-+{
-+#ifdef CONFIG_HPET_TIMER
-+ if (is_hpet_capable()) {
-+ /*
-+ * HPET initialization needs to do memory-mapped io. So, let
-+ * us do a late initialization after mem_init().
-+ */
-+ late_time_init = hpet_time_init;
-+ return;
-+ }
-+#endif
-+ get_time_values_from_xen();
-+
-+ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+ per_cpu(processed_system_time, 0) = processed_system_time;
-+ init_missing_ticks_accounting(0);
-+
-+ update_wallclock();
-+
-+ init_cpu_khz();
-+ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
-+ cpu_khz / 1000, cpu_khz % 1000);
-+
-+#if defined(__x86_64__)
-+ vxtime.mode = VXTIME_TSC;
-+ vxtime.quot = (1000000L << 32) / vxtime_hz;
-+ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
-+ sync_core();
-+ rdtscll(vxtime.last_tsc);
-+#endif
-+
-+ /* Cannot request_irq() until kmem is initialised. */
-+ late_time_init = setup_cpu0_timer_irq;
-+}
-+
-+/* Convert jiffies to system time. */
-+static inline u64 jiffies_to_st(unsigned long j)
-+{
-+ unsigned long seq;
-+ long delta;
-+ u64 st;
-+
-+ do {
-+ seq = read_seqbegin(&xtime_lock);
-+ delta = j - jiffies;
-+ /* NB. The next check can trigger in some wrap-around cases,
-+ * but that's ok: we'll just end up with a shorter timeout. */
-+ if (delta < 1)
-+ delta = 1;
-+ st = processed_system_time + (delta * (u64)NS_PER_TICK);
-+ } while (read_seqretry(&xtime_lock, seq));
-+
-+ return st;
-+}
-+
-+/*
-+ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
-+ * These functions are based on implementations from arch/s390/kernel/time.c
-+ */
-+void stop_hz_timer(void)
-+{
-+ unsigned int cpu = smp_processor_id();
-+ unsigned long j;
-+
-+ /* We must do this /before/ checking rcu_pending(). */
-+ cpu_set(cpu, nohz_cpu_mask);
-+ smp_mb();
-+
-+ /* Leave ourselves in 'tick mode' if rcu or softirq pending. */
-+ if (rcu_pending(cpu) || local_softirq_pending()) {
-+ cpu_clear(cpu, nohz_cpu_mask);
-+ j = jiffies + 1;
-+ } else {
-+ j = next_timer_interrupt();
-+ }
-+
-+ BUG_ON(HYPERVISOR_set_timer_op(jiffies_to_st(j)) != 0);
-+}
-+
-+void start_hz_timer(void)
-+{
-+ cpu_clear(smp_processor_id(), nohz_cpu_mask);
-+}
-+
-+/* No locking required. We are only CPU running, and interrupts are off. */
-+void time_resume(void)
-+{
-+ init_cpu_khz();
-+
-+ get_time_values_from_xen();
-+
-+ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+ per_cpu(processed_system_time, 0) = processed_system_time;
-+ init_missing_ticks_accounting(0);
-+
-+ update_wallclock();
-+}
-+
-+#ifdef CONFIG_SMP
-+static char timer_name[NR_CPUS][15];
-+
-+void local_setup_timer(unsigned int cpu)
-+{
-+ int seq;
-+
-+ BUG_ON(cpu == 0);
-+
-+ do {
-+ seq = read_seqbegin(&xtime_lock);
-+ /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
-+ per_cpu(processed_system_time, cpu) =
-+ per_cpu(shadow_time, 0).system_timestamp;
-+ init_missing_ticks_accounting(cpu);
-+ } while (read_seqretry(&xtime_lock, seq));
-+
-+ sprintf(timer_name[cpu], "timer%d", cpu);
-+ per_cpu(timer_irq, cpu) =
-+ bind_virq_to_irqhandler(
-+ VIRQ_TIMER,
-+ cpu,
-+ timer_interrupt,
-+ SA_INTERRUPT,
-+ timer_name[cpu],
-+ NULL);
-+ BUG_ON(per_cpu(timer_irq, cpu) < 0);
-+}
-+
-+void local_teardown_timer(unsigned int cpu)
-+{
-+ BUG_ON(cpu == 0);
-+ unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
-+}
-+#endif
-+
-+/*
-+ * /proc/sys/xen: This really belongs in another file. It can stay here for
-+ * now however.
-+ */
-+static ctl_table xen_subtable[] = {
-+ {1, "independent_wallclock", &independent_wallclock,
-+ sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
-+ {0}
-+};
-+static ctl_table xen_table[] = {
-+ {123, "xen", NULL, 0, 0555, xen_subtable},
-+ {0}
-+};
-+static int __init xen_sysctl_init(void)
-+{
-+ (void)register_sysctl_table(xen_table, 0);
-+ return 0;
-+}
-+__initcall(xen_sysctl_init);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/arch/i386/kernel/traps-xen.c b/arch/i386/kernel/traps-xen.c
-new file mode 100644
-index 0000000..1c4a427
---- /dev/null
-+++ b/arch/i386/kernel/traps-xen.c
-@@ -0,0 +1,1094 @@
-+/*
-+ * linux/arch/i386/traps.c
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ *
-+ * Pentium III FXSR, SSE support
-+ * Gareth Hughes <gareth@valinux.com>, May 2000
-+ */
-+
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'asm.s'.
-+ */
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/interrupt.h>
-+#include <linux/highmem.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/kprobes.h>
-+#include <linux/kexec.h>
-+
-+#ifdef CONFIG_EISA
-+#include <linux/ioport.h>
-+#include <linux/eisa.h>
-+#endif
-+
-+#ifdef CONFIG_MCA
-+#include <linux/mca.h>
-+#endif
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/nmi.h>
-+
-+#include <asm/smp.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/kdebug.h>
-+
-+#include <linux/module.h>
-+
-+#include "mach_traps.h"
-+
-+asmlinkage int system_call(void);
-+
-+struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
-+ { 0, 0 }, { 0, 0 } };
-+
-+/* Do we ignore FPU interrupts ? */
-+char ignore_fpu_irq = 0;
-+
-+#ifndef CONFIG_X86_NO_IDT
-+/*
-+ * The IDT has to be page-aligned to simplify the Pentium
-+ * F0 0F bug workaround.. We have a special link segment
-+ * for this.
-+ */
-+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
-+#endif
-+
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void alignment_check(void);
-+#ifndef CONFIG_XEN
-+asmlinkage void spurious_interrupt_bug(void);
-+#else
-+asmlinkage void fixup_4gb_segment(void);
-+#endif
-+asmlinkage void machine_check(void);
-+
-+static int kstack_depth_to_print = 24;
-+struct notifier_block *i386die_chain;
-+static DEFINE_SPINLOCK(die_notifier_lock);
-+
-+int register_die_notifier(struct notifier_block *nb)
-+{
-+ int err = 0;
-+ unsigned long flags;
-+ spin_lock_irqsave(&die_notifier_lock, flags);
-+ err = notifier_chain_register(&i386die_chain, nb);
-+ spin_unlock_irqrestore(&die_notifier_lock, flags);
-+ return err;
-+}
-+EXPORT_SYMBOL(register_die_notifier);
-+
-+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
-+{
-+ return p > (void *)tinfo &&
-+ p < (void *)tinfo + THREAD_SIZE - 3;
-+}
-+
-+static void print_addr_and_symbol(unsigned long addr, char *log_lvl)
-+{
-+ printk(log_lvl);
-+ printk(" [<%08lx>] ", addr);
-+ print_symbol("%s", addr);
-+ printk("\n");
-+}
-+
-+static inline unsigned long print_context_stack(struct thread_info *tinfo,
-+ unsigned long *stack, unsigned long ebp,
-+ char *log_lvl)
-+{
-+ unsigned long addr;
-+
-+#ifdef CONFIG_FRAME_POINTER
-+ while (valid_stack_ptr(tinfo, (void *)ebp)) {
-+ addr = *(unsigned long *)(ebp + 4);
-+ print_addr_and_symbol(addr, log_lvl);
-+ ebp = *(unsigned long *)ebp;
-+ }
-+#else
-+ while (valid_stack_ptr(tinfo, stack)) {
-+ addr = *stack++;
-+ if (__kernel_text_address(addr))
-+ print_addr_and_symbol(addr, log_lvl);
-+ }
-+#endif
-+ return ebp;
-+}
-+
-+static void show_trace_log_lvl(struct task_struct *task,
-+ unsigned long *stack, char *log_lvl)
-+{
-+ unsigned long ebp;
-+
-+ if (!task)
-+ task = current;
-+
-+ if (task == current) {
-+ /* Grab ebp right from our regs */
-+ asm ("movl %%ebp, %0" : "=r" (ebp) : );
-+ } else {
-+ /* ebp is the last reg pushed by switch_to */
-+ ebp = *(unsigned long *) task->thread.esp;
-+ }
-+
-+ while (1) {
-+ struct thread_info *context;
-+ context = (struct thread_info *)
-+ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-+ ebp = print_context_stack(context, stack, ebp, log_lvl);
-+ stack = (unsigned long*)context->previous_esp;
-+ if (!stack)
-+ break;
-+ printk(log_lvl);
-+ printk(" =======================\n");
-+ }
-+}
-+
-+void show_trace(struct task_struct *task, unsigned long * stack)
-+{
-+ show_trace_log_lvl(task, stack, "");
-+}
-+
-+static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
-+ char *log_lvl)
-+{
-+ unsigned long *stack;
-+ int i;
-+
-+ if (esp == NULL) {
-+ if (task)
-+ esp = (unsigned long*)task->thread.esp;
-+ else
-+ esp = (unsigned long *)&esp;
-+ }
-+
-+ stack = esp;
-+ printk(log_lvl);
-+ for(i = 0; i < kstack_depth_to_print; i++) {
-+ if (kstack_end(stack))
-+ break;
-+ if (i && ((i % 8) == 0)) {
-+ printk("\n");
-+ printk(log_lvl);
-+ printk(" ");
-+ }
-+ printk("%08lx ", *stack++);
-+ }
-+ printk("\n");
-+ printk(log_lvl);
-+ printk("Call Trace:\n");
-+ show_trace_log_lvl(task, esp, log_lvl);
-+}
-+
-+void show_stack(struct task_struct *task, unsigned long *esp)
-+{
-+ show_stack_log_lvl(task, esp, "");
-+}
-+
-+/*
-+ * The architecture-independent dump_stack generator
-+ */
-+void dump_stack(void)
-+{
-+ unsigned long stack;
-+
-+ show_trace(current, &stack);
-+}
-+
-+EXPORT_SYMBOL(dump_stack);
-+
-+void show_registers(struct pt_regs *regs)
-+{
-+ int i;
-+ int in_kernel = 1;
-+ unsigned long esp;
-+ unsigned short ss;
-+
-+ esp = (unsigned long) (&regs->esp);
-+ savesegment(ss, ss);
-+ if (user_mode(regs)) {
-+ in_kernel = 0;
-+ esp = regs->esp;
-+ ss = regs->xss & 0xffff;
-+ }
-+ print_modules();
-+ printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
-+ "EFLAGS: %08lx (%s %.*s) \n",
-+ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
-+ print_tainted(), regs->eflags, system_utsname.release,
-+ (int)strcspn(system_utsname.version, " "),
-+ system_utsname.version);
-+ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
-+ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
-+ regs->eax, regs->ebx, regs->ecx, regs->edx);
-+ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
-+ regs->esi, regs->edi, regs->ebp, esp);
-+ printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
-+ regs->xds & 0xffff, regs->xes & 0xffff, ss);
-+ printk(KERN_EMERG "Process %s (pid: %d, threadinfo=%p task=%p)",
-+ current->comm, current->pid, current_thread_info(), current);
-+ /*
-+ * When in-kernel, we also print out the stack and code at the
-+ * time of the fault..
-+ */
-+ if (in_kernel) {
-+ u8 __user *eip;
-+
-+ printk("\n" KERN_EMERG "Stack: ");
-+ show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG);
-+
-+ printk(KERN_EMERG "Code: ");
-+
-+ eip = (u8 __user *)regs->eip - 43;
-+ for (i = 0; i < 64; i++, eip++) {
-+ unsigned char c;
-+
-+ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
-+ printk(" Bad EIP value.");
-+ break;
-+ }
-+ if (eip == (u8 __user *)regs->eip)
-+ printk("<%02x> ", c);
-+ else
-+ printk("%02x ", c);
-+ }
-+ }
-+ printk("\n");
-+}
-+
-+static void handle_BUG(struct pt_regs *regs)
-+{
-+ unsigned short ud2;
-+ unsigned short line;
-+ char *file;
-+ char c;
-+ unsigned long eip;
-+
-+ eip = regs->eip;
-+
-+ if (eip < PAGE_OFFSET)
-+ goto no_bug;
-+ if (__get_user(ud2, (unsigned short __user *)eip))
-+ goto no_bug;
-+ if (ud2 != 0x0b0f)
-+ goto no_bug;
-+ if (__get_user(line, (unsigned short __user *)(eip + 2)))
-+ goto bug;
-+ if (__get_user(file, (char * __user *)(eip + 4)) ||
-+ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
-+ file = "<bad filename>";
-+
-+ printk(KERN_EMERG "------------[ cut here ]------------\n");
-+ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
-+
-+no_bug:
-+ return;
-+
-+ /* Here we know it was a BUG but file-n-line is unavailable */
-+bug:
-+ printk(KERN_EMERG "Kernel BUG\n");
-+}
-+
-+/* This is gone through when something in the kernel
-+ * has done something bad and is about to be terminated.
-+*/
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+ static struct {
-+ spinlock_t lock;
-+ u32 lock_owner;
-+ int lock_owner_depth;
-+ } die = {
-+ .lock = SPIN_LOCK_UNLOCKED,
-+ .lock_owner = -1,
-+ .lock_owner_depth = 0
-+ };
-+ static int die_counter;
-+ unsigned long flags;
-+
-+ if (die.lock_owner != raw_smp_processor_id()) {
-+ console_verbose();
-+ spin_lock_irqsave(&die.lock, flags);
-+ die.lock_owner = smp_processor_id();
-+ die.lock_owner_depth = 0;
-+ bust_spinlocks(1);
-+ }
-+ else
-+ local_save_flags(flags);
-+
-+ if (++die.lock_owner_depth < 3) {
-+ int nl = 0;
-+ handle_BUG(regs);
-+ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-+#ifdef CONFIG_PREEMPT
-+ printk(KERN_EMERG "PREEMPT ");
-+ nl = 1;
-+#endif
-+#ifdef CONFIG_SMP
-+ if (!nl)
-+ printk(KERN_EMERG);
-+ printk("SMP ");
-+ nl = 1;
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+ if (!nl)
-+ printk(KERN_EMERG);
-+ printk("DEBUG_PAGEALLOC");
-+ nl = 1;
-+#endif
-+ if (nl)
-+ printk("\n");
-+ notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
-+ show_registers(regs);
-+ } else
-+ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
-+
-+ bust_spinlocks(0);
-+ die.lock_owner = -1;
-+ spin_unlock_irqrestore(&die.lock, flags);
-+
-+ if (kexec_should_crash(current))
-+ crash_kexec(regs);
-+
-+ if (in_interrupt())
-+ panic("Fatal exception in interrupt");
-+
-+ if (panic_on_oops) {
-+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
-+ ssleep(5);
-+ panic("Fatal exception");
-+ }
-+ do_exit(SIGSEGV);
-+}
-+
-+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-+{
-+ if (!user_mode_vm(regs))
-+ die(str, regs, err);
-+}
-+
-+static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
-+ struct pt_regs * regs, long error_code,
-+ siginfo_t *info)
-+{
-+ struct task_struct *tsk = current;
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = trapnr;
-+
-+ if (regs->eflags & VM_MASK) {
-+ if (vm86)
-+ goto vm86_trap;
-+ goto trap_signal;
-+ }
-+
-+ if (!user_mode(regs))
-+ goto kernel_trap;
-+
-+ trap_signal: {
-+ if (info)
-+ force_sig_info(signr, info, tsk);
-+ else
-+ force_sig(signr, tsk);
-+ return;
-+ }
-+
-+ kernel_trap: {
-+ if (!fixup_exception(regs))
-+ die(str, regs, error_code);
-+ return;
-+ }
-+
-+ vm86_trap: {
-+ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
-+ if (ret) goto trap_signal;
-+ return;
-+ }
-+}
-+
-+#define DO_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
-+}
-+
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ siginfo_t info; \
-+ info.si_signo = signr; \
-+ info.si_errno = 0; \
-+ info.si_code = sicode; \
-+ info.si_addr = (void __user *)siaddr; \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
-+}
-+
-+#define DO_VM86_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
-+}
-+
-+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ siginfo_t info; \
-+ info.si_signo = signr; \
-+ info.si_errno = 0; \
-+ info.si_code = sicode; \
-+ info.si_addr = (void __user *)siaddr; \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
-+}
-+
-+DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
-+#ifndef CONFIG_KPROBES
-+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
-+#endif
-+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
-+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
-+DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
-+
-+fastcall void __kprobes do_general_protection(struct pt_regs * regs,
-+ long error_code)
-+{
-+ current->thread.error_code = error_code;
-+ current->thread.trap_no = 13;
-+
-+ if (regs->eflags & VM_MASK)
-+ goto gp_in_vm86;
-+
-+ if (!user_mode(regs))
-+ goto gp_in_kernel;
-+
-+ current->thread.error_code = error_code;
-+ current->thread.trap_no = 13;
-+ force_sig(SIGSEGV, current);
-+ return;
-+
-+gp_in_vm86:
-+ local_irq_enable();
-+ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-+ return;
-+
-+gp_in_kernel:
-+ if (!fixup_exception(regs)) {
-+ if (notify_die(DIE_GPF, "general protection fault", regs,
-+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+ return;
-+ die("general protection fault", regs, error_code);
-+ }
-+}
-+
-+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
-+ "to continue\n");
-+ printk(KERN_EMERG "You probably have a hardware problem with your RAM "
-+ "chips\n");
-+
-+ /* Clear and disable the memory parity error line. */
-+ clear_mem_error(reason);
-+}
-+
-+static void io_check_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
-+ show_registers(regs);
-+
-+ /* Re-enable the IOCK line, wait for a few seconds */
-+ clear_io_check_error(reason);
-+}
-+
-+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{
-+#ifdef CONFIG_MCA
-+ /* Might actually be able to figure out what the guilty party
-+ * is. */
-+ if( MCA_bus ) {
-+ mca_handle_nmi();
-+ return;
-+ }
-+#endif
-+ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-+ reason, smp_processor_id());
-+ printk("Dazed and confused, but trying to continue\n");
-+ printk("Do you have a strange power saving mode enabled?\n");
-+}
-+
-+static DEFINE_SPINLOCK(nmi_print_lock);
-+
-+void die_nmi (struct pt_regs *regs, const char *msg)
-+{
-+ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 0, SIGINT) ==
-+ NOTIFY_STOP)
-+ return;
-+
-+ spin_lock(&nmi_print_lock);
-+ /*
-+ * We are in trouble anyway, lets at least try
-+ * to get a message out.
-+ */
-+ bust_spinlocks(1);
-+ printk(KERN_EMERG "%s", msg);
-+ printk(" on CPU%d, eip %08lx, registers:\n",
-+ smp_processor_id(), regs->eip);
-+ show_registers(regs);
-+ printk(KERN_EMERG "console shuts up ...\n");
-+ console_silent();
-+ spin_unlock(&nmi_print_lock);
-+ bust_spinlocks(0);
-+
-+ /* If we are in kernel we are probably nested up pretty bad
-+ * and might aswell get out now while we still can.
-+ */
-+ if (!user_mode(regs)) {
-+ current->thread.trap_no = 2;
-+ crash_kexec(regs);
-+ }
-+
-+ do_exit(SIGSEGV);
-+}
-+
-+static void default_do_nmi(struct pt_regs * regs)
-+{
-+ unsigned char reason = 0;
-+
-+ /* Only the BSP gets external NMIs from the system. */
-+ if (!smp_processor_id())
-+ reason = get_nmi_reason();
-+
-+ if (!(reason & 0xc0)) {
-+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
-+ == NOTIFY_STOP)
-+ return;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ /*
-+ * Ok, so this is none of the documented NMI sources,
-+ * so it must be the NMI watchdog.
-+ */
-+ if (nmi_watchdog) {
-+ nmi_watchdog_tick(regs);
-+ return;
-+ }
-+#endif
-+ unknown_nmi_error(reason, regs);
-+ return;
-+ }
-+ if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
-+ return;
-+ if (reason & 0x80)
-+ mem_parity_error(reason, regs);
-+ if (reason & 0x40)
-+ io_check_error(reason, regs);
-+ /*
-+ * Reassert NMI in case it became active meanwhile
-+ * as it's edge-triggered.
-+ */
-+ reassert_nmi();
-+}
-+
-+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
-+{
-+ return 0;
-+}
-+
-+static nmi_callback_t nmi_callback = dummy_nmi_callback;
-+
-+fastcall void do_nmi(struct pt_regs * regs, long error_code)
-+{
-+ int cpu;
-+
-+ nmi_enter();
-+
-+ cpu = smp_processor_id();
-+
-+ ++nmi_count(cpu);
-+
-+ if (!rcu_dereference(nmi_callback)(regs, cpu))
-+ default_do_nmi(regs);
-+
-+ nmi_exit();
-+}
-+
-+void set_nmi_callback(nmi_callback_t callback)
-+{
-+ rcu_assign_pointer(nmi_callback, callback);
-+}
-+EXPORT_SYMBOL_GPL(set_nmi_callback);
-+
-+void unset_nmi_callback(void)
-+{
-+ nmi_callback = dummy_nmi_callback;
-+}
-+EXPORT_SYMBOL_GPL(unset_nmi_callback);
-+
-+#ifdef CONFIG_KPROBES
-+fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
-+{
-+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-+ == NOTIFY_STOP)
-+ return;
-+ /* This is an interrupt gate, because kprobes wants interrupts
-+ disabled. Normal trap handlers don't. */
-+ restore_interrupts(regs);
-+ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
-+}
-+#endif
-+
-+/*
-+ * Our handling of the processor debug registers is non-trivial.
-+ * We do not clear them on entry and exit from the kernel. Therefore
-+ * it is possible to get a watchpoint trap here from inside the kernel.
-+ * However, the code in ./ptrace.c has ensured that the user can
-+ * only set watchpoints on userspace addresses. Therefore the in-kernel
-+ * watchpoint trap can only occur in code which is reading/writing
-+ * from user space. Such code must not hold kernel locks (since it
-+ * can equally take a page fault), therefore it is safe to call
-+ * force_sig_info even though that claims and releases locks.
-+ *
-+ * Code in ./signal.c ensures that the debug control register
-+ * is restored before we deliver any signal, and therefore that
-+ * user code runs with the correct debug control register even though
-+ * we clear it here.
-+ *
-+ * Being careful here means that we don't have to be as careful in a
-+ * lot of more complicated places (task switching can be a bit lazy
-+ * about restoring all the debug state, and ptrace doesn't have to
-+ * find every occurrence of the TF bit that could be saved away even
-+ * by user code)
-+ */
-+fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
-+{
-+ unsigned int condition;
-+ struct task_struct *tsk = current;
-+
-+ get_debugreg(condition, 6);
-+
-+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+ SIGTRAP) == NOTIFY_STOP)
-+ return;
-+ /* It's safe to allow irq's after DR6 has been saved */
-+ if (regs->eflags & X86_EFLAGS_IF)
-+ local_irq_enable();
-+
-+ /* Mask out spurious debug traps due to lazy DR7 setting */
-+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+ if (!tsk->thread.debugreg[7])
-+ goto clear_dr7;
-+ }
-+
-+ if (regs->eflags & VM_MASK)
-+ goto debug_vm86;
-+
-+ /* Save debug status register where ptrace can see it */
-+ tsk->thread.debugreg[6] = condition;
-+
-+ /*
-+ * Single-stepping through TF: make sure we ignore any events in
-+ * kernel space (but re-enable TF when returning to user mode).
-+ */
-+ if (condition & DR_STEP) {
-+ /*
-+ * We already checked v86 mode above, so we can
-+ * check for kernel mode by just checking the CPL
-+ * of CS.
-+ */
-+ if (!user_mode(regs))
-+ goto clear_TF_reenable;
-+ }
-+
-+ /* Ok, finally something we can handle */
-+ send_sigtrap(tsk, regs, error_code);
-+
-+ /* Disable additional traps. They'll be re-enabled when
-+ * the signal is delivered.
-+ */
-+clear_dr7:
-+ set_debugreg(0, 7);
-+ return;
-+
-+debug_vm86:
-+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
-+ return;
-+
-+clear_TF_reenable:
-+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+ regs->eflags &= ~TF_MASK;
-+ return;
-+}
-+
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
-+ */
-+void math_error(void __user *eip)
-+{
-+ struct task_struct * task;
-+ siginfo_t info;
-+ unsigned short cwd, swd;
-+
-+ /*
-+ * Save the info for the exception handler and clear the error.
-+ */
-+ task = current;
-+ save_init_fpu(task);
-+ task->thread.trap_no = 16;
-+ task->thread.error_code = 0;
-+ info.si_signo = SIGFPE;
-+ info.si_errno = 0;
-+ info.si_code = __SI_FAULT;
-+ info.si_addr = eip;
-+ /*
-+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+ * status. 0x3f is the exception bits in these regs, 0x200 is the
-+ * C1 reg you need in case of a stack fault, 0x040 is the stack
-+ * fault bit. We should only be taking one exception at a time,
-+ * so if this combination doesn't produce any single exception,
-+ * then we have a bad program that isn't syncronizing its FPU usage
-+ * and it will suffer the consequences since we won't be able to
-+ * fully reproduce the context of the exception
-+ */
-+ cwd = get_fpu_cwd(task);
-+ swd = get_fpu_swd(task);
-+ switch (swd & ~cwd & 0x3f) {
-+ case 0x000: /* No unmasked exception */
-+ return;
-+ default: /* Multiple exceptions */
-+ break;
-+ case 0x001: /* Invalid Op */
-+ /*
-+ * swd & 0x240 == 0x040: Stack Underflow
-+ * swd & 0x240 == 0x240: Stack Overflow
-+ * User must clear the SF bit (0x40) if set
-+ */
-+ info.si_code = FPE_FLTINV;
-+ break;
-+ case 0x002: /* Denormalize */
-+ case 0x010: /* Underflow */
-+ info.si_code = FPE_FLTUND;
-+ break;
-+ case 0x004: /* Zero Divide */
-+ info.si_code = FPE_FLTDIV;
-+ break;
-+ case 0x008: /* Overflow */
-+ info.si_code = FPE_FLTOVF;
-+ break;
-+ case 0x020: /* Precision */
-+ info.si_code = FPE_FLTRES;
-+ break;
-+ }
-+ force_sig_info(SIGFPE, &info, task);
-+}
-+
-+fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
-+{
-+ ignore_fpu_irq = 1;
-+ math_error((void __user *)regs->eip);
-+}
-+
-+static void simd_math_error(void __user *eip)
-+{
-+ struct task_struct * task;
-+ siginfo_t info;
-+ unsigned short mxcsr;
-+
-+ /*
-+ * Save the info for the exception handler and clear the error.
-+ */
-+ task = current;
-+ save_init_fpu(task);
-+ task->thread.trap_no = 19;
-+ task->thread.error_code = 0;
-+ info.si_signo = SIGFPE;
-+ info.si_errno = 0;
-+ info.si_code = __SI_FAULT;
-+ info.si_addr = eip;
-+ /*
-+ * The SIMD FPU exceptions are handled a little differently, as there
-+ * is only a single status/control register. Thus, to determine which
-+ * unmasked exception was caught we must mask the exception mask bits
-+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+ */
-+ mxcsr = get_fpu_mxcsr(task);
-+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+ case 0x000:
-+ default:
-+ break;
-+ case 0x001: /* Invalid Op */
-+ info.si_code = FPE_FLTINV;
-+ break;
-+ case 0x002: /* Denormalize */
-+ case 0x010: /* Underflow */
-+ info.si_code = FPE_FLTUND;
-+ break;
-+ case 0x004: /* Zero Divide */
-+ info.si_code = FPE_FLTDIV;
-+ break;
-+ case 0x008: /* Overflow */
-+ info.si_code = FPE_FLTOVF;
-+ break;
-+ case 0x020: /* Precision */
-+ info.si_code = FPE_FLTRES;
-+ break;
-+ }
-+ force_sig_info(SIGFPE, &info, task);
-+}
-+
-+fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
-+ long error_code)
-+{
-+ if (cpu_has_xmm) {
-+ /* Handle SIMD FPU exceptions on PIII+ processors. */
-+ ignore_fpu_irq = 1;
-+ simd_math_error((void __user *)regs->eip);
-+ } else {
-+ /*
-+ * Handle strange cache flush from user space exception
-+ * in all other cases. This is undocumented behaviour.
-+ */
-+ if (regs->eflags & VM_MASK) {
-+ handle_vm86_fault((struct kernel_vm86_regs *)regs,
-+ error_code);
-+ return;
-+ }
-+ current->thread.trap_no = 19;
-+ current->thread.error_code = error_code;
-+ die_if_kernel("cache flush denied", regs, error_code);
-+ force_sig(SIGSEGV, current);
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
-+ long error_code)
-+{
-+#if 0
-+ /* No need to warn about this any longer. */
-+ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
-+#endif
-+}
-+
-+fastcall void setup_x86_bogus_stack(unsigned char * stk)
-+{
-+ unsigned long *switch16_ptr, *switch32_ptr;
-+ struct pt_regs *regs;
-+ unsigned long stack_top, stack_bot;
-+ unsigned short iret_frame16_off;
-+ int cpu = smp_processor_id();
-+ /* reserve the space on 32bit stack for the magic switch16 pointer */
-+ memmove(stk, stk + 8, sizeof(struct pt_regs));
-+ switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
-+ regs = (struct pt_regs *)stk;
-+ /* now the switch32 on 16bit stack */
-+ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-+ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
-+ switch32_ptr = (unsigned long *)(stack_top - 8);
-+ iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
-+ /* copy iret frame on 16bit stack */
-+ memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
-+ /* fill in the switch pointers */
-+ switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
-+ switch16_ptr[1] = __ESPFIX_SS;
-+ switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
-+ 8 - CPU_16BIT_STACK_SIZE;
-+ switch32_ptr[1] = __KERNEL_DS;
-+}
-+
-+fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
-+{
-+ unsigned long *switch32_ptr;
-+ unsigned char *stack16, *stack32;
-+ unsigned long stack_top, stack_bot;
-+ int len;
-+ int cpu = smp_processor_id();
-+ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-+ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
-+ switch32_ptr = (unsigned long *)(stack_top - 8);
-+ /* copy the data from 16bit stack to 32bit stack */
-+ len = CPU_16BIT_STACK_SIZE - 8 - sp;
-+ stack16 = (unsigned char *)(stack_bot + sp);
-+ stack32 = (unsigned char *)
-+ (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
-+ memcpy(stack32, stack16, len);
-+ return stack32;
-+}
-+#endif
-+
-+/*
-+ * 'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ *
-+ * Must be called with kernel preemption disabled (in this case,
-+ * local interrupts are disabled at the call-site in entry.S).
-+ */
-+asmlinkage void math_state_restore(struct pt_regs regs)
-+{
-+ struct thread_info *thread = current_thread_info();
-+ struct task_struct *tsk = thread->task;
-+
-+ /* NB. 'clts' is done for us by Xen during virtual trap. */
-+ if (!tsk_used_math(tsk))
-+ init_fpu(tsk);
-+ restore_fpu(tsk);
-+ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
-+}
-+
-+#ifndef CONFIG_MATH_EMULATION
-+
-+asmlinkage void math_emulate(long arg)
-+{
-+ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
-+ printk(KERN_EMERG "killing %s.\n",current->comm);
-+ force_sig(SIGFPE,current);
-+ schedule();
-+}
-+
-+#endif /* CONFIG_MATH_EMULATION */
-+
-+#ifdef CONFIG_X86_F00F_BUG
-+void __init trap_init_f00f_bug(void)
-+{
-+ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
-+
-+ /*
-+ * Update the IDT descriptor and reload the IDT so that
-+ * it uses the read-only mapped virtual address.
-+ */
-+ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
-+ load_idt(&idt_descr);
-+}
-+#endif
-+
-+
-+/*
-+ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
-+ * for those that specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
-+ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
-+ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
-+ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
-+ { 5, 0, __KERNEL_CS, (unsigned long)bounds },
-+ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
-+ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
-+ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
-+ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
-+ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
-+ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
-+ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
-+ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
-+ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
-+ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
-+ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
-+#ifdef CONFIG_X86_MCE
-+ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
-+#endif
-+ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
-+ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
-+ { 0, 0, 0, 0 }
-+};
-+
-+void __init trap_init(void)
-+{
-+ HYPERVISOR_set_trap_table(trap_table);
-+
-+ if (cpu_has_fxsr) {
-+ /*
-+ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
-+ * Generates a compile-time "error: zero width for bit-field" if
-+ * the alignment is wrong.
-+ */
-+ struct fxsrAlignAssert {
-+ int _:!(offsetof(struct task_struct,
-+ thread.i387.fxsave) & 15);
-+ };
-+
-+ printk(KERN_INFO "Enabling fast FPU save and restore... ");
-+ set_in_cr4(X86_CR4_OSFXSR);
-+ printk("done.\n");
-+ }
-+ if (cpu_has_xmm) {
-+ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
-+ "support... ");
-+ set_in_cr4(X86_CR4_OSXMMEXCPT);
-+ printk("done.\n");
-+ }
-+
-+ /*
-+ * Should be a barrier for any external CPU state.
-+ */
-+ cpu_init();
-+}
-+
-+void smp_trap_init(trap_info_t *trap_ctxt)
-+{
-+ trap_info_t *t = trap_table;
-+
-+ for (t = trap_table; t->address; t++) {
-+ trap_ctxt[t->vector].flags = t->flags;
-+ trap_ctxt[t->vector].cs = t->cs;
-+ trap_ctxt[t->vector].address = t->address;
-+ }
-+}
-+
-+static int __init kstack_setup(char *s)
-+{
-+ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-+ return 0;
-+}
-+__setup("kstack=", kstack_setup);
-diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
-index b814dbd..85d4645 100644
---- a/arch/i386/kernel/traps.c
-+++ b/arch/i386/kernel/traps.c
-@@ -567,18 +567,11 @@ static void mem_parity_error(unsigned ch
-
- static void io_check_error(unsigned char reason, struct pt_regs * regs)
- {
-- unsigned long i;
--
- printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
- show_registers(regs);
-
- /* Re-enable the IOCK line, wait for a few seconds */
-- reason = (reason & 0xf) | 8;
-- outb(reason, 0x61);
-- i = 2000;
-- while (--i) udelay(1000);
-- reason &= ~8;
-- outb(reason, 0x61);
-+ clear_io_check_error(reason);
- }
-
- static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
-index f51c894..da2d48e 100644
---- a/arch/i386/kernel/vm86.c
-+++ b/arch/i386/kernel/vm86.c
-@@ -97,7 +97,9 @@
- struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
- struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
- {
-+#ifndef CONFIG_X86_NO_TSS
- struct tss_struct *tss;
-+#endif
- struct pt_regs *ret;
- unsigned long tmp;
-
-@@ -122,7 +124,9 @@ struct pt_regs * fastcall save_v86_state
- do_exit(SIGSEGV);
- }
-
-+#ifndef CONFIG_X86_NO_TSS
- tss = &per_cpu(init_tss, get_cpu());
-+#endif
- current->thread.esp0 = current->thread.saved_esp0;
- current->thread.sysenter_cs = __KERNEL_CS;
- load_esp0(tss, &current->thread);
-@@ -251,7 +255,9 @@ out:
-
- static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
- {
-+#ifndef CONFIG_X86_NO_TSS
- struct tss_struct *tss;
-+#endif
- /*
- * make sure the vm86() system call doesn't try to do anything silly
- */
-@@ -295,7 +301,9 @@ static void do_sys_vm86(struct kernel_vm
- savesegment(fs, tsk->thread.saved_fs);
- savesegment(gs, tsk->thread.saved_gs);
-
-+#ifndef CONFIG_X86_NO_TSS
- tss = &per_cpu(init_tss, get_cpu());
-+#endif
- tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
- if (cpu_has_sep)
- tsk->thread.sysenter_cs = 0;
-diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
-index 4710195..5db6e08 100644
---- a/arch/i386/kernel/vmlinux.lds.S
-+++ b/arch/i386/kernel/vmlinux.lds.S
-@@ -34,6 +34,13 @@ SECTIONS
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
- __stop___ex_table = .;
-
-+ . = ALIGN(16);
-+ __start_smp_alternatives_table = .;
-+ __smp_alternatives : AT(ADDR(__smp_alternatives) - LOAD_OFFSET) { *(__smp_alternatives) }
-+ __stop_smp_alternatives_table = .;
-+
-+ __smp_replacements : AT(ADDR(__smp_replacements) - LOAD_OFFSET) { *(__smp_replacements) }
-+
- RODATA
-
- /* writeable */
-diff --git a/arch/i386/kernel/vsyscall-note-xen.S b/arch/i386/kernel/vsyscall-note-xen.S
-new file mode 100644
-index 0000000..c2d6dbf
---- /dev/null
-+++ b/arch/i386/kernel/vsyscall-note-xen.S
-@@ -0,0 +1,32 @@
-+/*
-+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
-+ * Here we can supply some information useful to userland.
-+ * First we get the vanilla i386 note that supplies the kernel version info.
-+ */
-+
-+#include "vsyscall-note.S"
-+
-+/*
-+ * Now we add a special note telling glibc's dynamic linker a fake hardware
-+ * flavor that it will use to choose the search path for libraries in the
-+ * same way it uses real hardware capabilities like "mmx".
-+ * We supply "nosegneg" as the fake capability, to indicate that we
-+ * do not like negative offsets in instructions using segment overrides,
-+ * since we implement those inefficiently. This makes it possible to
-+ * install libraries optimized to avoid those access patterns in someplace
-+ * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file
-+ * corresponding to the bits here is needed to make ldconfig work right.
-+ * It should contain:
-+ * hwcap 0 nosegneg
-+ * to match the mapping of bit to name that we give here.
-+ */
-+#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
-+ ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
-+ .long ncaps, mask
-+#define NOTE_KERNELCAP(bit, name) \
-+ .byte bit; .asciz name
-+#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
-+
-+NOTE_KERNELCAP_BEGIN(1, 1)
-+NOTE_KERNELCAP(1, "nosegneg") /* Change 1 back to 0 when glibc is fixed! */
-+NOTE_KERNELCAP_END
-diff --git a/arch/i386/kernel/vsyscall.S b/arch/i386/kernel/vsyscall.S
-index b403890..432aa46 100644
---- a/arch/i386/kernel/vsyscall.S
-+++ b/arch/i386/kernel/vsyscall.S
-@@ -7,9 +7,11 @@ vsyscall_int80_start:
- .incbin "arch/i386/kernel/vsyscall-int80.so"
- vsyscall_int80_end:
-
-+#ifdef CONFIG_X86_SYSENTER
- .globl vsyscall_sysenter_start, vsyscall_sysenter_end
- vsyscall_sysenter_start:
- .incbin "arch/i386/kernel/vsyscall-sysenter.so"
- vsyscall_sysenter_end:
-+#endif
-
- __FINIT
-diff --git a/arch/i386/mach-xen/Makefile b/arch/i386/mach-xen/Makefile
-new file mode 100644
-index 0000000..012fe34
---- /dev/null
-+++ b/arch/i386/mach-xen/Makefile
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+obj-y := setup.o
-diff --git a/arch/i386/mach-xen/setup.c b/arch/i386/mach-xen/setup.c
-new file mode 100644
-index 0000000..c78032b
---- /dev/null
-+++ b/arch/i386/mach-xen/setup.c
-@@ -0,0 +1,37 @@
-+/*
-+ * Machine specific setup for generic
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <asm/acpi.h>
-+#include <asm/arch_hooks.h>
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+#define DEFAULT_SEND_IPI (1)
-+#else
-+#define DEFAULT_SEND_IPI (0)
-+#endif
-+
-+int no_broadcast=DEFAULT_SEND_IPI;
-+
-+static __init int no_ipi_broadcast(char *str)
-+{
-+ get_option(&str, &no_broadcast);
-+ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
-+ "IPI Broadcast");
-+ return 1;
-+}
-+
-+__setup("no_ipi_broadcast", no_ipi_broadcast);
-+
-+static int __init print_ipi_mode(void)
-+{
-+ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
-+ "Shortcut");
-+ return 0;
-+}
-+
-+late_initcall(print_ipi_mode);
-diff --git a/arch/i386/mm/Makefile b/arch/i386/mm/Makefile
-index 80908b5..2b33b20 100644
---- a/arch/i386/mm/Makefile
-+++ b/arch/i386/mm/Makefile
-@@ -8,3 +8,11 @@ obj-$(CONFIG_NUMA) += discontig.o
- obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
- obj-$(CONFIG_HIGHMEM) += highmem.o
- obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y += hypervisor.o
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff --git a/arch/i386/mm/fault-xen.c b/arch/i386/mm/fault-xen.c
-new file mode 100644
-index 0000000..308c0bb
---- /dev/null
-+++ b/arch/i386/mm/fault-xen.c
-@@ -0,0 +1,617 @@
-+/*
-+ * linux/arch/i386/mm/fault.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ */
-+
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h> /* For unblank_screen() */
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
-+
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/desc.h>
-+#include <asm/kdebug.h>
-+
-+extern void die(const char *,struct pt_regs *,long);
-+
-+/*
-+ * Unlock any spinlocks which will prevent us from getting the
-+ * message out
-+ */
-+void bust_spinlocks(int yes)
-+{
-+ int loglevel_save = console_loglevel;
-+
-+ if (yes) {
-+ oops_in_progress = 1;
-+ return;
-+ }
-+#ifdef CONFIG_VT
-+ unblank_screen();
-+#endif
-+ oops_in_progress = 0;
-+ /*
-+ * OK, the message is on the console. Now we call printk()
-+ * without oops_in_progress set so that printk will give klogd
-+ * a poke. Hold onto your hats...
-+ */
-+ console_loglevel = 15; /* NMI oopser may have shut the console up */
-+ printk(" ");
-+ console_loglevel = loglevel_save;
-+}
-+
-+/*
-+ * Return EIP plus the CS segment base. The segment limit is also
-+ * adjusted, clamped to the kernel/user address space (whichever is
-+ * appropriate), and returned in *eip_limit.
-+ *
-+ * The segment is checked, because it might have been changed by another
-+ * task between the original faulting instruction and here.
-+ *
-+ * If CS is no longer a valid code segment, or if EIP is beyond the
-+ * limit, or if it is a kernel address when CS is not a kernel segment,
-+ * then the returned value will be greater than *eip_limit.
-+ *
-+ * This is slow, but is very rarely executed.
-+ */
-+static inline unsigned long get_segment_eip(struct pt_regs *regs,
-+ unsigned long *eip_limit)
-+{
-+ unsigned long eip = regs->eip;
-+ unsigned seg = regs->xcs & 0xffff;
-+ u32 seg_ar, seg_limit, base, *desc;
-+
-+ /* The standard kernel/user address space limit. */
-+ *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
-+
-+ /* Unlikely, but must come before segment checks. */
-+ if (unlikely((regs->eflags & VM_MASK) != 0))
-+ return eip + (seg << 4);
-+
-+ /* By far the most common cases. */
-+ if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
-+ return eip;
-+
-+ /* Check the segment exists, is within the current LDT/GDT size,
-+ that kernel/user (ring 0..3) has the appropriate privilege,
-+ that it's a code segment, and get the limit. */
-+ __asm__ ("larl %3,%0; lsll %3,%1"
-+ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
-+ if ((~seg_ar & 0x9800) || eip > seg_limit) {
-+ *eip_limit = 0;
-+ return 1; /* So that returned eip > *eip_limit. */
-+ }
-+
-+ /* Get the GDT/LDT descriptor base.
-+ When you look for races in this code remember that
-+ LDT and other horrors are only used in user space. */
-+ if (seg & (1<<2)) {
-+ /* Must lock the LDT while reading it. */
-+ down(&current->mm->context.sem);
-+ desc = current->mm->context.ldt;
-+ desc = (void *)desc + (seg & ~7);
-+ } else {
-+ /* Must disable preemption while reading the GDT. */
-+ desc = (u32 *)get_cpu_gdt_table(get_cpu());
-+ desc = (void *)desc + (seg & ~7);
-+ }
-+
-+ /* Decode the code segment base from the descriptor */
-+ base = get_desc_base((unsigned long *)desc);
-+
-+ if (seg & (1<<2)) {
-+ up(&current->mm->context.sem);
-+ } else
-+ put_cpu();
-+
-+ /* Adjust EIP and segment limit, and clamp at the kernel limit.
-+ It's legitimate for segments to wrap at 0xffffffff. */
-+ seg_limit += base;
-+ if (seg_limit < *eip_limit && seg_limit >= base)
-+ *eip_limit = seg_limit;
-+ return eip + base;
-+}
-+
-+/*
-+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
-+ * Check that here and ignore it.
-+ */
-+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
-+{
-+ unsigned long limit;
-+ unsigned long instr = get_segment_eip (regs, &limit);
-+ int scan_more = 1;
-+ int prefetch = 0;
-+ int i;
-+
-+ for (i = 0; scan_more && i < 15; i++) {
-+ unsigned char opcode;
-+ unsigned char instr_hi;
-+ unsigned char instr_lo;
-+
-+ if (instr > limit)
-+ break;
-+ if (__get_user(opcode, (unsigned char __user *) instr))
-+ break;
-+
-+ instr_hi = opcode & 0xf0;
-+ instr_lo = opcode & 0x0f;
-+ instr++;
-+
-+ switch (instr_hi) {
-+ case 0x20:
-+ case 0x30:
-+ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
-+ scan_more = ((instr_lo & 7) == 0x6);
-+ break;
-+
-+ case 0x60:
-+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
-+ scan_more = (instr_lo & 0xC) == 0x4;
-+ break;
-+ case 0xF0:
-+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
-+ scan_more = !instr_lo || (instr_lo>>1) == 1;
-+ break;
-+ case 0x00:
-+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
-+ scan_more = 0;
-+ if (instr > limit)
-+ break;
-+ if (__get_user(opcode, (unsigned char __user *) instr))
-+ break;
-+ prefetch = (instr_lo == 0xF) &&
-+ (opcode == 0x0D || opcode == 0x18);
-+ break;
-+ default:
-+ scan_more = 0;
-+ break;
-+ }
-+ }
-+ return prefetch;
-+}
-+
-+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+ unsigned long error_code)
-+{
-+ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+ boot_cpu_data.x86 >= 6)) {
-+ /* Catch an obscure case of prefetch inside an NX page. */
-+ if (nx_enabled && (error_code & 16))
-+ return 0;
-+ return __is_prefetch(regs, addr);
-+ }
-+ return 0;
-+}
-+
-+static noinline void force_sig_info_fault(int si_signo, int si_code,
-+ unsigned long address, struct task_struct *tsk)
-+{
-+ siginfo_t info;
-+
-+ info.si_signo = si_signo;
-+ info.si_errno = 0;
-+ info.si_code = si_code;
-+ info.si_addr = (void __user *)address;
-+ force_sig_info(si_signo, &info, tsk);
-+}
-+
-+fastcall void do_invalid_op(struct pt_regs *, unsigned long);
-+
-+#ifdef CONFIG_X86_PAE
-+static void dump_fault_path(unsigned long address)
-+{
-+ unsigned long *p, page;
-+ unsigned long mfn;
-+
-+ page = read_cr3();
-+ p = (unsigned long *)__va(page);
-+ p += (address >> 30) * 2;
-+ printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
-+ if (p[0] & 1) {
-+ mfn = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20);
-+ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
-+ p = (unsigned long *)__va(page);
-+ address &= 0x3fffffff;
-+ p += (address >> 21) * 2;
-+ printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
-+ page, p[1], p[0]);
-+#ifndef CONFIG_HIGHPTE
-+ if (p[0] & 1) {
-+ mfn = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20);
-+ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
-+ p = (unsigned long *) __va(page);
-+ address &= 0x001fffff;
-+ p += (address >> 12) * 2;
-+ printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
-+ page, p[1], p[0]);
-+ }
-+#endif
-+ }
-+}
-+#else
-+static void dump_fault_path(unsigned long address)
-+{
-+ unsigned long page;
-+
-+ page = read_cr3();
-+ page = ((unsigned long *) __va(page))[address >> 22];
-+ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
-+ machine_to_phys(page));
-+ /*
-+ * We must not directly access the pte in the highpte
-+ * case, the page table might be allocated in highmem.
-+ * And lets rather not kmap-atomic the pte, just in case
-+ * it's allocated already.
-+ */
-+#ifndef CONFIG_HIGHPTE
-+ if (page & 1) {
-+ page &= PAGE_MASK;
-+ address &= 0x003ff000;
-+ page = machine_to_phys(page);
-+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
-+ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
-+ machine_to_phys(page));
-+ }
-+#endif
-+}
-+#endif
-+
-+
-+/*
-+ * This routine handles page faults. It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ *
-+ * error_code:
-+ * bit 0 == 0 means no page found, 1 means protection fault
-+ * bit 1 == 0 means read, 1 means write
-+ * bit 2 == 0 means kernel, 1 means user-mode
-+ */
-+fastcall void __kprobes do_page_fault(struct pt_regs *regs,
-+ unsigned long error_code)
-+{
-+ struct task_struct *tsk;
-+ struct mm_struct *mm;
-+ struct vm_area_struct * vma;
-+ unsigned long address;
-+ int write, si_code;
-+
-+ /* get the address */
-+ address = read_cr2();
-+
-+ /* Set the "privileged fault" bit to something sane. */
-+ error_code &= ~4;
-+ error_code |= (regs->xcs & 2) << 1;
-+ if (regs->eflags & X86_EFLAGS_VM)
-+ error_code |= 4;
-+
-+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+ SIGSEGV) == NOTIFY_STOP)
-+ return;
-+ /* It's safe to allow irq's after cr2 has been saved */
-+ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
-+ local_irq_enable();
-+
-+ tsk = current;
-+
-+ si_code = SEGV_MAPERR;
-+
-+ /*
-+ * We fault-in kernel-space virtual memory on-demand. The
-+ * 'reference' page table is init_mm.pgd.
-+ *
-+ * NOTE! We MUST NOT take any locks for this case. We may
-+ * be in an interrupt or a critical region, and should
-+ * only copy the information from the master page table,
-+ * nothing more.
-+ *
-+ * This verifies that the fault happens in kernel space
-+ * (error_code & 4) == 0, and that the fault was not a
-+ * protection error (error_code & 1) == 0.
-+ */
-+ if (unlikely(address >= TASK_SIZE)) {
-+ if (!(error_code & 5))
-+ goto vmalloc_fault;
-+ /*
-+ * Don't take the mm semaphore here. If we fixup a prefetch
-+ * fault we could otherwise deadlock.
-+ */
-+ goto bad_area_nosemaphore;
-+ }
-+
-+ mm = tsk->mm;
-+
-+ /*
-+ * If we're in an interrupt, have no user context or are running in an
-+ * atomic region then we must not take the fault..
-+ */
-+ if (in_atomic() || !mm)
-+ goto bad_area_nosemaphore;
-+
-+ /* When running in the kernel we expect faults to occur only to
-+ * addresses in user space. All other faults represent errors in the
-+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
-+ * erroneous fault occuring in a code path which already holds mmap_sem
-+ * we will deadlock attempting to validate the fault against the
-+ * address space. Luckily the kernel only validly references user
-+ * space from well defined areas of code, which are listed in the
-+ * exceptions table.
-+ *
-+ * As the vast majority of faults will be valid we will only perform
-+ * the source reference check when there is a possibilty of a deadlock.
-+ * Attempt to lock the address space, if we cannot we then validate the
-+ * source. If this is invalid we can skip the address space check,
-+ * thus avoiding the deadlock.
-+ */
-+ if (!down_read_trylock(&mm->mmap_sem)) {
-+ if ((error_code & 4) == 0 &&
-+ !search_exception_tables(regs->eip))
-+ goto bad_area_nosemaphore;
-+ down_read(&mm->mmap_sem);
-+ }
-+
-+ vma = find_vma(mm, address);
-+ if (!vma)
-+ goto bad_area;
-+ if (vma->vm_start <= address)
-+ goto good_area;
-+ if (!(vma->vm_flags & VM_GROWSDOWN))
-+ goto bad_area;
-+ if (error_code & 4) {
-+ /*
-+ * accessing the stack below %esp is always a bug.
-+ * The "+ 32" is there due to some instructions (like
-+ * pusha) doing post-decrement on the stack and that
-+ * doesn't show up until later..
-+ */
-+ if (address + 32 < regs->esp)
-+ goto bad_area;
-+ }
-+ if (expand_stack(vma, address))
-+ goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+ si_code = SEGV_ACCERR;
-+ write = 0;
-+ switch (error_code & 3) {
-+ default: /* 3: write, present */
-+#ifdef TEST_VERIFY_AREA
-+ if (regs->cs == GET_KERNEL_CS())
-+ printk("WP fault at %08lx\n", regs->eip);
-+#endif
-+ /* fall through */
-+ case 2: /* write, not present */
-+ if (!(vma->vm_flags & VM_WRITE))
-+ goto bad_area;
-+ write++;
-+ break;
-+ case 1: /* read, present */
-+ goto bad_area;
-+ case 0: /* read, not present */
-+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-+ goto bad_area;
-+ }
-+
-+ survive:
-+ /*
-+ * If for any reason at all we couldn't handle the fault,
-+ * make sure we exit gracefully rather than endlessly redo
-+ * the fault.
-+ */
-+ switch (handle_mm_fault(mm, vma, address, write)) {
-+ case VM_FAULT_MINOR:
-+ tsk->min_flt++;
-+ break;
-+ case VM_FAULT_MAJOR:
-+ tsk->maj_flt++;
-+ break;
-+ case VM_FAULT_SIGBUS:
-+ goto do_sigbus;
-+ case VM_FAULT_OOM:
-+ goto out_of_memory;
-+ default:
-+ BUG();
-+ }
-+
-+ /*
-+ * Did it hit the DOS screen memory VA from vm86 mode?
-+ */
-+ if (regs->eflags & VM_MASK) {
-+ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
-+ if (bit < 32)
-+ tsk->thread.screen_bitmap |= 1 << bit;
-+ }
-+ up_read(&mm->mmap_sem);
-+ return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+ up_read(&mm->mmap_sem);
-+
-+bad_area_nosemaphore:
-+ /* User mode accesses just cause a SIGSEGV */
-+ if (error_code & 4) {
-+ /*
-+ * Valid to do another page fault here because this one came
-+ * from user space.
-+ */
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+ tsk->thread.cr2 = address;
-+ /* Kernel addresses are always protection faults */
-+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+ tsk->thread.trap_no = 14;
-+ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
-+ return;
-+ }
-+
-+#ifdef CONFIG_X86_F00F_BUG
-+ /*
-+ * Pentium F0 0F C7 C8 bug workaround.
-+ */
-+ if (boot_cpu_data.f00f_bug) {
-+ unsigned long nr;
-+
-+ nr = (address - idt_descr.address) >> 3;
-+
-+ if (nr == 6) {
-+ do_invalid_op(regs, 0);
-+ return;
-+ }
-+ }
-+#endif
-+
-+no_context:
-+ /* Are we prepared to handle this kernel fault? */
-+ if (fixup_exception(regs))
-+ return;
-+
-+ /*
-+ * Valid to do another page fault here, because if this fault
-+ * had been triggered by is_prefetch fixup_exception would have
-+ * handled it.
-+ */
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
-+
-+ bust_spinlocks(1);
-+
-+#ifdef CONFIG_X86_PAE
-+ if (error_code & 16) {
-+ pte_t *pte = lookup_address(address);
-+
-+ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
-+ printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
-+ }
-+#endif
-+ if (address < PAGE_SIZE)
-+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-+ else
-+ printk(KERN_ALERT "Unable to handle kernel paging request");
-+ printk(" at virtual address %08lx\n",address);
-+ printk(KERN_ALERT " printing eip:\n");
-+ printk("%08lx\n", regs->eip);
-+ dump_fault_path(address);
-+ tsk->thread.cr2 = address;
-+ tsk->thread.trap_no = 14;
-+ tsk->thread.error_code = error_code;
-+ die("Oops", regs, error_code);
-+ bust_spinlocks(0);
-+ do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+ up_read(&mm->mmap_sem);
-+ if (tsk->pid == 1) {
-+ yield();
-+ down_read(&mm->mmap_sem);
-+ goto survive;
-+ }
-+ printk("VM: killing process %s\n", tsk->comm);
-+ if (error_code & 4)
-+ do_exit(SIGKILL);
-+ goto no_context;
-+
-+do_sigbus:
-+ up_read(&mm->mmap_sem);
-+
-+ /* Kernel mode? Handle exceptions or die */
-+ if (!(error_code & 4))
-+ goto no_context;
-+
-+ /* User space => ok to do another page fault */
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+ tsk->thread.cr2 = address;
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 14;
-+ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
-+ return;
-+
-+vmalloc_fault:
-+ {
-+ /*
-+ * Synchronize this task's top level page-table
-+ * with the 'reference' page table.
-+ *
-+ * Do _not_ use "tsk" here. We might be inside
-+ * an interrupt in the middle of a task switch..
-+ */
-+ int index = pgd_index(address);
-+ unsigned long pgd_paddr;
-+ pgd_t *pgd, *pgd_k;
-+ pud_t *pud, *pud_k;
-+ pmd_t *pmd, *pmd_k;
-+ pte_t *pte_k;
-+
-+ pgd_paddr = read_cr3();
-+ pgd = index + (pgd_t *)__va(pgd_paddr);
-+ pgd_k = init_mm.pgd + index;
-+
-+ if (!pgd_present(*pgd_k))
-+ goto no_context;
-+
-+ /*
-+ * set_pgd(pgd, *pgd_k); here would be useless on PAE
-+ * and redundant with the set_pmd() on non-PAE. As would
-+ * set_pud.
-+ */
-+
-+ pud = pud_offset(pgd, address);
-+ pud_k = pud_offset(pgd_k, address);
-+ if (!pud_present(*pud_k))
-+ goto no_context;
-+
-+ pmd = pmd_offset(pud, address);
-+ pmd_k = pmd_offset(pud_k, address);
-+ if (!pmd_present(*pmd_k))
-+ goto no_context;
-+#ifndef CONFIG_XEN
-+ set_pmd(pmd, *pmd_k);
-+#else
-+ /*
-+ * When running on Xen we must launder *pmd_k through
-+ * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
-+ */
-+ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
-+#endif
-+
-+ pte_k = pte_offset_kernel(pmd_k, address);
-+ if (!pte_present(*pte_k))
-+ goto no_context;
-+ return;
-+ }
-+}
-diff --git a/arch/i386/mm/highmem-xen.c b/arch/i386/mm/highmem-xen.c
-new file mode 100644
-index 0000000..2a9ce1c
---- /dev/null
-+++ b/arch/i386/mm/highmem-xen.c
-@@ -0,0 +1,123 @@
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+
-+void *kmap(struct page *page)
-+{
-+ might_sleep();
-+ if (!PageHighMem(page))
-+ return page_address(page);
-+ return kmap_high(page);
-+}
-+
-+void kunmap(struct page *page)
-+{
-+ if (in_interrupt())
-+ BUG();
-+ if (!PageHighMem(page))
-+ return;
-+ kunmap_high(page);
-+}
-+
-+/*
-+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
-+ * no global lock is needed and because the kmap code must perform a global TLB
-+ * invalidation when the kmap pool wraps.
-+ *
-+ * However when holding an atomic kmap is is not legal to sleep, so atomic
-+ * kmaps are appropriate for short, tight code paths only.
-+ */
-+static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
-+{
-+ enum fixed_addresses idx;
-+ unsigned long vaddr;
-+
-+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ inc_preempt_count();
-+ if (!PageHighMem(page))
-+ return page_address(page);
-+
-+ idx = type + KM_TYPE_NR*smp_processor_id();
-+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+ if (!pte_none(*(kmap_pte-idx)))
-+ BUG();
-+#endif
-+ set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
-+
-+ return (void*) vaddr;
-+}
-+
-+void *kmap_atomic(struct page *page, enum km_type type)
-+{
-+ return __kmap_atomic(page, type, kmap_prot);
-+}
-+
-+/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
-+void *kmap_atomic_pte(struct page *page, enum km_type type)
-+{
-+ return __kmap_atomic(page, type, PAGE_KERNEL_RO);
-+}
-+
-+void kunmap_atomic(void *kvaddr, enum km_type type)
-+{
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-+
-+ if (vaddr < FIXADDR_START) { // FIXME
-+ dec_preempt_count();
-+ preempt_check_resched();
-+ return;
-+ }
-+
-+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-+ BUG();
-+
-+ /*
-+ * force other mappings to Oops if they'll try to access
-+ * this pte without first remap it
-+ */
-+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
-+ __flush_tlb_one(vaddr);
-+#endif
-+
-+ dec_preempt_count();
-+ preempt_check_resched();
-+}
-+
-+/* This is the same as kmap_atomic() but can map memory that doesn't
-+ * have a struct page associated with it.
-+ */
-+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-+{
-+ enum fixed_addresses idx;
-+ unsigned long vaddr;
-+
-+ inc_preempt_count();
-+
-+ idx = type + KM_TYPE_NR*smp_processor_id();
-+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
-+ __flush_tlb_one(vaddr);
-+
-+ return (void*) vaddr;
-+}
-+
-+struct page *kmap_atomic_to_page(void *ptr)
-+{
-+ unsigned long idx, vaddr = (unsigned long)ptr;
-+ pte_t *pte;
-+
-+ if (vaddr < FIXADDR_START)
-+ return virt_to_page(ptr);
-+
-+ idx = virt_to_fix(vaddr);
-+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
-+ return pte_page(*pte);
-+}
-+
-+EXPORT_SYMBOL(kmap);
-+EXPORT_SYMBOL(kunmap);
-+EXPORT_SYMBOL(kmap_atomic);
-+EXPORT_SYMBOL(kunmap_atomic);
-+EXPORT_SYMBOL(kmap_atomic_to_page);
-diff --git a/arch/i386/mm/hypervisor.c b/arch/i386/mm/hypervisor.c
-new file mode 100644
-index 0000000..cfc359e
---- /dev/null
-+++ b/arch/i386/mm/hypervisor.c
-@@ -0,0 +1,471 @@
-+/******************************************************************************
-+ * mm/hypervisor.c
-+ *
-+ * Update page tables via the hypervisor.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/vmalloc.h>
-+#include <asm/page.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/features.h>
-+#include <xen/interface/memory.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <asm/tlbflush.h>
-+
-+#ifdef CONFIG_X86_64
-+#define pmd_val_ma(v) (v).pmd
-+#else
-+#ifdef CONFIG_X86_PAE
-+# define pmd_val_ma(v) ((v).pmd)
-+# define pud_val_ma(v) ((v).pgd.pgd)
-+#else
-+# define pmd_val_ma(v) ((v).pud.pgd.pgd)
-+#endif
-+#endif
-+
-+void xen_l1_entry_update(pte_t *ptr, pte_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = pte_val_ma(val);
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = pmd_val_ma(val);
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#ifdef CONFIG_X86_PAE
-+void xen_l3_entry_update(pud_t *ptr, pud_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = pud_val_ma(val);
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif
-+
-+#ifdef CONFIG_X86_64
-+void xen_l3_entry_update(pud_t *ptr, pud_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = val.pud;
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
-+{
-+ mmu_update_t u;
-+ u.ptr = virt_to_machine(ptr);
-+ u.val = val.pgd;
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif /* CONFIG_X86_64 */
-+
-+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
-+{
-+ mmu_update_t u;
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ BUG_ON(pfn != mfn);
-+ return;
-+ }
-+ u.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-+ u.val = pfn;
-+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pt_switch(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_NEW_BASEPTR;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_new_user_pt(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_NEW_USER_BASEPTR;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_tlb_flush(void)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_invlpg(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_INVLPG_LOCAL;
-+ op.arg1.linear_addr = ptr & PAGE_MASK;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+void xen_tlb_flush_all(void)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_TLB_FLUSH_ALL;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_tlb_flush_mask(cpumask_t *mask)
-+{
-+ struct mmuext_op op;
-+ if ( cpus_empty(*mask) )
-+ return;
-+ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-+ op.arg2.vcpumask = mask->bits;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_invlpg_all(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_INVLPG_ALL;
-+ op.arg1.linear_addr = ptr & PAGE_MASK;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ if ( cpus_empty(*mask) )
-+ return;
-+ op.cmd = MMUEXT_INVLPG_MULTI;
-+ op.arg1.linear_addr = ptr & PAGE_MASK;
-+ op.arg2.vcpumask = mask->bits;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+void xen_pgd_pin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+#ifdef CONFIG_X86_64
-+ op.cmd = MMUEXT_PIN_L4_TABLE;
-+#elif defined(CONFIG_X86_PAE)
-+ op.cmd = MMUEXT_PIN_L3_TABLE;
-+#else
-+ op.cmd = MMUEXT_PIN_L2_TABLE;
-+#endif
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pgd_unpin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_UNPIN_TABLE;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pte_pin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_PIN_L1_TABLE;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pte_unpin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_UNPIN_TABLE;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#ifdef CONFIG_X86_64
-+void xen_pud_pin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_PIN_L3_TABLE;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pud_unpin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_UNPIN_TABLE;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pmd_pin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_PIN_L2_TABLE;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pmd_unpin(unsigned long ptr)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_UNPIN_TABLE;
-+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif /* CONFIG_X86_64 */
-+
-+void xen_set_ldt(unsigned long ptr, unsigned long len)
-+{
-+ struct mmuext_op op;
-+ op.cmd = MMUEXT_SET_LDT;
-+ op.arg1.linear_addr = ptr;
-+ op.arg2.nr_ents = len;
-+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+/*
-+ * Bitmap is indexed by page number. If bit is set, the page is part of a
-+ * xen_create_contiguous_region() area of memory.
-+ */
-+unsigned long *contiguous_bitmap;
-+
-+static void contiguous_bitmap_set(
-+ unsigned long first_page, unsigned long nr_pages)
-+{
-+ unsigned long start_off, end_off, curr_idx, end_idx;
-+
-+ curr_idx = first_page / BITS_PER_LONG;
-+ start_off = first_page & (BITS_PER_LONG-1);
-+ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
-+ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
-+
-+ if (curr_idx == end_idx) {
-+ contiguous_bitmap[curr_idx] |=
-+ ((1UL<<end_off)-1) & -(1UL<<start_off);
-+ } else {
-+ contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
-+ while ( ++curr_idx < end_idx )
-+ contiguous_bitmap[curr_idx] = ~0UL;
-+ contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
-+ }
-+}
-+
-+static void contiguous_bitmap_clear(
-+ unsigned long first_page, unsigned long nr_pages)
-+{
-+ unsigned long start_off, end_off, curr_idx, end_idx;
-+
-+ curr_idx = first_page / BITS_PER_LONG;
-+ start_off = first_page & (BITS_PER_LONG-1);
-+ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
-+ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
-+
-+ if (curr_idx == end_idx) {
-+ contiguous_bitmap[curr_idx] &=
-+ -(1UL<<end_off) | ((1UL<<start_off)-1);
-+ } else {
-+ contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
-+ while ( ++curr_idx != end_idx )
-+ contiguous_bitmap[curr_idx] = 0;
-+ contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
-+ }
-+}
-+
-+/* Ensure multi-page extents are contiguous in machine memory. */
-+int xen_create_contiguous_region(
-+ unsigned long vstart, unsigned int order, unsigned int address_bits)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ unsigned long frame, i, flags;
-+ struct xen_memory_reservation reservation = {
-+ .extent_start = &frame,
-+ .nr_extents = 1,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+
-+ /*
-+ * Currently an auto-translated guest will not perform I/O, nor will
-+ * it require PAE page directories below 4GB. Therefore any calls to
-+ * this function are redundant and can be ignored.
-+ */
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return 0;
-+
-+ scrub_pages(vstart, 1 << order);
-+
-+ balloon_lock(flags);
-+
-+ /* 1. Zap current PTEs, giving away the underlying pages. */
-+ for (i = 0; i < (1<<order); i++) {
-+ pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
-+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-+ frame = pte_mfn(*pte);
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+ INVALID_P2M_ENTRY);
-+ BUG_ON(HYPERVISOR_memory_op(
-+ XENMEM_decrease_reservation, &reservation) != 1);
-+ }
-+
-+ /* 2. Get a new contiguous memory extent. */
-+ reservation.extent_order = order;
-+ reservation.address_bits = address_bits;
-+ frame = __pa(vstart) >> PAGE_SHIFT;
-+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+ &reservation) != 1)
-+ goto fail;
-+
-+ /* 3. Map the new extent in place of old pages. */
-+ for (i = 0; i < (1<<order); i++) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ vstart + (i*PAGE_SIZE),
-+ pfn_pte_ma(frame+i, PAGE_KERNEL), 0));
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i);
-+ }
-+
-+ flush_tlb_all();
-+
-+ contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
-+
-+ balloon_unlock(flags);
-+
-+ return 0;
-+
-+ fail:
-+ reservation.extent_order = 0;
-+ reservation.address_bits = 0;
-+
-+ for (i = 0; i < (1<<order); i++) {
-+ frame = (__pa(vstart) >> PAGE_SHIFT) + i;
-+ BUG_ON(HYPERVISOR_memory_op(
-+ XENMEM_populate_physmap, &reservation) != 1);
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ vstart + (i*PAGE_SIZE),
-+ pfn_pte_ma(frame, PAGE_KERNEL), 0));
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+ }
-+
-+ flush_tlb_all();
-+
-+ balloon_unlock(flags);
-+
-+ return -ENOMEM;
-+}
-+
-+void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ unsigned long frame, i, flags;
-+ struct xen_memory_reservation reservation = {
-+ .extent_start = &frame,
-+ .nr_extents = 1,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return;
-+
-+ scrub_pages(vstart, 1 << order);
-+
-+ balloon_lock(flags);
-+
-+ contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
-+
-+ /* 1. Zap current PTEs, giving away the underlying pages. */
-+ for (i = 0; i < (1<<order); i++) {
-+ pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
-+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-+ frame = pte_mfn(*pte);
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+ INVALID_P2M_ENTRY);
-+ BUG_ON(HYPERVISOR_memory_op(
-+ XENMEM_decrease_reservation, &reservation) != 1);
-+ }
-+
-+ /* 2. Map new pages in place of old pages. */
-+ for (i = 0; i < (1<<order); i++) {
-+ frame = (__pa(vstart) >> PAGE_SHIFT) + i;
-+ BUG_ON(HYPERVISOR_memory_op(
-+ XENMEM_populate_physmap, &reservation) != 1);
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ vstart + (i*PAGE_SIZE),
-+ pfn_pte_ma(frame, PAGE_KERNEL), 0));
-+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+ }
-+
-+ flush_tlb_all();
-+
-+ balloon_unlock(flags);
-+}
-+
-+#ifdef __i386__
-+int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
-+{
-+ __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
-+ maddr_t mach_lp = arbitrary_virt_to_machine(lp);
-+ return HYPERVISOR_update_descriptor(
-+ mach_lp, (u64)entry_a | ((u64)entry_b<<32));
-+}
-+#endif
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/arch/i386/mm/init-xen.c b/arch/i386/mm/init-xen.c
-new file mode 100644
-index 0000000..b490dbd
---- /dev/null
-+++ b/arch/i386/mm/init-xen.c
-@@ -0,0 +1,846 @@
-+/*
-+ * linux/arch/i386/mm/init.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/hugetlb.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/slab.h>
-+#include <linux/proc_fs.h>
-+#include <linux/efi.h>
-+#include <linux/memory_hotplug.h>
-+#include <linux/initrd.h>
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/sections.h>
-+#include <asm/hypervisor.h>
-+
-+extern unsigned long *contiguous_bitmap;
-+
-+unsigned int __VMALLOC_RESERVE = 128 << 20;
-+
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+unsigned long highstart_pfn, highend_pfn;
-+
-+static int noinline do_test_wp_bit(void);
-+
-+/*
-+ * Creates a middle page table and puts a pointer to it in the
-+ * given global directory entry. This only returns the gd entry
-+ * in non-PAE compilation mode, since the middle layer is folded.
-+ */
-+static pmd_t * __init one_md_table_init(pgd_t *pgd)
-+{
-+ pud_t *pud;
-+ pmd_t *pmd_table;
-+
-+#ifdef CONFIG_X86_PAE
-+ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
-+ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-+ pud = pud_offset(pgd, 0);
-+ if (pmd_table != pmd_offset(pud, 0))
-+ BUG();
-+#else
-+ pud = pud_offset(pgd, 0);
-+ pmd_table = pmd_offset(pud, 0);
-+#endif
-+
-+ return pmd_table;
-+}
-+
-+/*
-+ * Create a page table and place a pointer to it in a middle page
-+ * directory entry.
-+ */
-+static pte_t * __init one_page_table_init(pmd_t *pmd)
-+{
-+ if (pmd_none(*pmd)) {
-+ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+ make_lowmem_page_readonly(page_table,
-+ XENFEAT_writable_page_tables);
-+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-+ if (page_table != pte_offset_kernel(pmd, 0))
-+ BUG();
-+
-+ return page_table;
-+ }
-+
-+ return pte_offset_kernel(pmd, 0);
-+}
-+
-+/*
-+ * This function initializes a certain range of kernel virtual memory
-+ * with new bootmem page tables, everywhere page tables are missing in
-+ * the given range.
-+ */
-+
-+/*
-+ * NOTE: The pagetables are allocated contiguous on the physical space
-+ * so we can cache the place of the first one and move around without
-+ * checking the pgd every time.
-+ */
-+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ int pgd_idx, pmd_idx;
-+ unsigned long vaddr;
-+
-+ vaddr = start;
-+ pgd_idx = pgd_index(vaddr);
-+ pmd_idx = pmd_index(vaddr);
-+ pgd = pgd_base + pgd_idx;
-+
-+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-+ if (pgd_none(*pgd))
-+ one_md_table_init(pgd);
-+ pud = pud_offset(pgd, vaddr);
-+ pmd = pmd_offset(pud, vaddr);
-+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
-+ if (vaddr < HYPERVISOR_VIRT_START && pmd_none(*pmd))
-+ one_page_table_init(pmd);
-+
-+ vaddr += PMD_SIZE;
-+ }
-+ pmd_idx = 0;
-+ }
-+}
-+
-+static inline int is_kernel_text(unsigned long addr)
-+{
-+ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
-+ return 1;
-+ return 0;
-+}
-+
-+/*
-+ * This maps the physical memory to kernel virtual address space, a total
-+ * of max_low_pfn pages, by creating page tables starting from address
-+ * PAGE_OFFSET.
-+ */
-+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
-+{
-+ unsigned long pfn;
-+ pgd_t *pgd;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ int pgd_idx, pmd_idx, pte_ofs;
-+
-+ unsigned long max_ram_pfn = xen_start_info->nr_pages;
-+ if (max_ram_pfn > max_low_pfn)
-+ max_ram_pfn = max_low_pfn;
-+
-+ pgd_idx = pgd_index(PAGE_OFFSET);
-+ pgd = pgd_base + pgd_idx;
-+ pfn = 0;
-+ pmd_idx = pmd_index(PAGE_OFFSET);
-+ pte_ofs = pte_index(PAGE_OFFSET);
-+
-+ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
-+#ifdef CONFIG_XEN
-+ /*
-+ * Native linux hasn't PAE-paging enabled yet at this
-+ * point. When running as xen domain we are in PAE
-+ * mode already, thus we can't simply hook a empty
-+ * pmd. That would kill the mappings we are currently
-+ * using ...
-+ */
-+ pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
-+#else
-+ pmd = one_md_table_init(pgd);
-+#endif
-+ if (pfn >= max_low_pfn)
-+ continue;
-+ pmd += pmd_idx;
-+ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
-+ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
-+ if (address >= HYPERVISOR_VIRT_START)
-+ continue;
-+
-+ /* Map with big pages if possible, otherwise create normal page tables. */
-+ if (cpu_has_pse) {
-+ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
-+
-+ if (is_kernel_text(address) || is_kernel_text(address2))
-+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
-+ else
-+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-+ pfn += PTRS_PER_PTE;
-+ } else {
-+ pte = one_page_table_init(pmd);
-+
-+ pte += pte_ofs;
-+ for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
-+ /* XEN: Only map initial RAM allocation. */
-+ if ((pfn >= max_ram_pfn) || pte_present(*pte))
-+ continue;
-+ if (is_kernel_text(address))
-+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
-+ else
-+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
-+ }
-+ pte_ofs = 0;
-+ }
-+ }
-+ pmd_idx = 0;
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+
-+static inline int page_kills_ppro(unsigned long pagenr)
-+{
-+ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
-+ return 1;
-+ return 0;
-+}
-+
-+extern int is_available_memory(efi_memory_desc_t *);
-+
-+int page_is_ram(unsigned long pagenr)
-+{
-+ int i;
-+ unsigned long addr, end;
-+
-+ if (efi_enabled) {
-+ efi_memory_desc_t *md;
-+ void *p;
-+
-+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-+ md = p;
-+ if (!is_available_memory(md))
-+ continue;
-+ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
-+
-+ if ((pagenr >= addr) && (pagenr < end))
-+ return 1;
-+ }
-+ return 0;
-+ }
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+
-+ if (e820.map[i].type != E820_RAM) /* not usable memory */
-+ continue;
-+ /*
-+ * !!!FIXME!!! Some BIOSen report areas as RAM that
-+ * are not. Notably the 640->1Mb area. We need a sanity
-+ * check here.
-+ */
-+ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
-+ if ((pagenr >= addr) && (pagenr < end))
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+#else /* CONFIG_XEN */
-+
-+#define page_kills_ppro(p) 0
-+#define page_is_ram(p) 1
-+
-+#endif
-+
-+#ifdef CONFIG_HIGHMEM
-+pte_t *kmap_pte;
-+pgprot_t kmap_prot;
-+
-+#define kmap_get_fixmap_pte(vaddr) \
-+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
-+
-+static void __init kmap_init(void)
-+{
-+ unsigned long kmap_vstart;
-+
-+ /* cache the first kmap pte */
-+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
-+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-+
-+ kmap_prot = PAGE_KERNEL;
-+}
-+
-+static void __init permanent_kmaps_init(pgd_t *pgd_base)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ unsigned long vaddr;
-+
-+ vaddr = PKMAP_BASE;
-+ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-+
-+ pgd = swapper_pg_dir + pgd_index(vaddr);
-+ pud = pud_offset(pgd, vaddr);
-+ pmd = pmd_offset(pud, vaddr);
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ pkmap_page_table = pte;
-+}
-+
-+static void __meminit free_new_highpage(struct page *page, int pfn)
-+{
-+ set_page_count(page, 1);
-+ if (pfn < xen_start_info->nr_pages)
-+ __free_page(page);
-+ totalhigh_pages++;
-+}
-+
-+void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
-+{
-+ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
-+ ClearPageReserved(page);
-+ free_new_highpage(page, pfn);
-+ } else
-+ SetPageReserved(page);
-+}
-+
-+static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
-+{
-+ free_new_highpage(page, pfn);
-+ totalram_pages++;
-+#ifdef CONFIG_FLATMEM
-+ max_mapnr = max(pfn, max_mapnr);
-+#endif
-+ num_physpages++;
-+ return 0;
-+}
-+
-+/*
-+ * Not currently handling the NUMA case.
-+ * Assuming single node and all memory that
-+ * has been added dynamically that would be
-+ * onlined here is in HIGHMEM
-+ */
-+void online_page(struct page *page)
-+{
-+ ClearPageReserved(page);
-+ add_one_highpage_hotplug(page, page_to_pfn(page));
-+}
-+
-+
-+#ifdef CONFIG_NUMA
-+extern void set_highmem_pages_init(int);
-+#else
-+static void __init set_highmem_pages_init(int bad_ppro)
-+{
-+ int pfn;
-+ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
-+ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
-+ totalram_pages += totalhigh_pages;
-+}
-+#endif /* CONFIG_FLATMEM */
-+
-+#else
-+#define kmap_init() do { } while (0)
-+#define permanent_kmaps_init(pgd_base) do { } while (0)
-+#define set_highmem_pages_init(bad_ppro) do { } while (0)
-+#endif /* CONFIG_HIGHMEM */
-+
-+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-+EXPORT_SYMBOL(__PAGE_KERNEL);
-+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-+
-+#ifdef CONFIG_NUMA
-+extern void __init remap_numa_kva(void);
-+#else
-+#define remap_numa_kva() do {} while (0)
-+#endif
-+
-+pgd_t *swapper_pg_dir;
-+
-+static void __init pagetable_init (void)
-+{
-+ unsigned long vaddr;
-+ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
-+
-+ swapper_pg_dir = pgd_base;
-+ init_mm.pgd = pgd_base;
-+
-+ /* Enable PSE if available */
-+ if (cpu_has_pse) {
-+ set_in_cr4(X86_CR4_PSE);
-+ }
-+
-+ /* Enable PGE if available */
-+ if (cpu_has_pge) {
-+ set_in_cr4(X86_CR4_PGE);
-+ __PAGE_KERNEL |= _PAGE_GLOBAL;
-+ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
-+ }
-+
-+ kernel_physical_mapping_init(pgd_base);
-+ remap_numa_kva();
-+
-+ /*
-+ * Fixed mappings, only the page table structure has to be
-+ * created - mappings will be set by set_fixmap():
-+ */
-+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-+ page_table_range_init(vaddr, 0, pgd_base);
-+
-+ permanent_kmaps_init(pgd_base);
-+}
-+
-+#ifdef CONFIG_SOFTWARE_SUSPEND
-+/*
-+ * Swap suspend & friends need this for resume because things like the intel-agp
-+ * driver might have split up a kernel 4MB mapping.
-+ */
-+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
-+ __attribute__ ((aligned (PAGE_SIZE)));
-+
-+static inline void save_pg_dir(void)
-+{
-+ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
-+}
-+#else
-+static inline void save_pg_dir(void)
-+{
-+}
-+#endif
-+
-+void zap_low_mappings (void)
-+{
-+ int i;
-+
-+ save_pg_dir();
-+
-+ /*
-+ * Zap initial low-memory mappings.
-+ *
-+ * Note that "pgd_clear()" doesn't do it for
-+ * us, because pgd_clear() is a no-op on i386.
-+ */
-+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
-+#else
-+ set_pgd(swapper_pg_dir+i, __pgd(0));
-+#endif
-+ flush_tlb_all();
-+}
-+
-+static int disable_nx __initdata = 0;
-+u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
-+EXPORT_SYMBOL(__supported_pte_mask);
-+
-+/*
-+ * noexec = on|off
-+ *
-+ * Control non executable mappings.
-+ *
-+ * on Enable
-+ * off Disable
-+ */
-+void __init noexec_setup(const char *str)
-+{
-+ if (!strncmp(str, "on",2) && cpu_has_nx) {
-+ __supported_pte_mask |= _PAGE_NX;
-+ disable_nx = 0;
-+ } else if (!strncmp(str,"off",3)) {
-+ disable_nx = 1;
-+ __supported_pte_mask &= ~_PAGE_NX;
-+ }
-+}
-+
-+int nx_enabled = 0;
-+#ifdef CONFIG_X86_PAE
-+
-+static void __init set_nx(void)
-+{
-+ unsigned int v[4], l, h;
-+
-+ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
-+ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
-+ if ((v[3] & (1 << 20)) && !disable_nx) {
-+ rdmsr(MSR_EFER, l, h);
-+ l |= EFER_NX;
-+ wrmsr(MSR_EFER, l, h);
-+ nx_enabled = 1;
-+ __supported_pte_mask |= _PAGE_NX;
-+ }
-+ }
-+}
-+
-+/*
-+ * Enables/disables executability of a given kernel page and
-+ * returns the previous setting.
-+ */
-+int __init set_kernel_exec(unsigned long vaddr, int enable)
-+{
-+ pte_t *pte;
-+ int ret = 1;
-+
-+ if (!nx_enabled)
-+ goto out;
-+
-+ pte = lookup_address(vaddr);
-+ BUG_ON(!pte);
-+
-+ if (!pte_exec_kernel(*pte))
-+ ret = 0;
-+
-+ if (enable)
-+ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
-+ else
-+ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
-+ __flush_tlb_all();
-+out:
-+ return ret;
-+}
-+
-+#endif
-+
-+/*
-+ * paging_init() sets up the page tables - note that the first 8MB are
-+ * already mapped by head.S.
-+ *
-+ * This routines also unmaps the page at virtual kernel address 0, so
-+ * that we can trap those pesky NULL-reference errors in the kernel.
-+ */
-+void __init paging_init(void)
-+{
-+ int i;
-+
-+#ifdef CONFIG_X86_PAE
-+ set_nx();
-+ if (nx_enabled)
-+ printk("NX (Execute Disable) protection: active\n");
-+#endif
-+
-+ pagetable_init();
-+
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+ /*
-+ * We will bail out later - printk doesn't work right now so
-+ * the user would just see a hanging kernel.
-+ * when running as xen domain we are already in PAE mode at
-+ * this point.
-+ */
-+ if (cpu_has_pae)
-+ set_in_cr4(X86_CR4_PAE);
-+#endif
-+ __flush_tlb_all();
-+
-+ kmap_init();
-+
-+ /* Switch to the real shared_info page, and clear the dummy page. */
-+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
-+
-+ /* Setup mapping of lower 1st MB */
-+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+ if (xen_start_info->flags & SIF_PRIVILEGED)
-+ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+ else
-+ __set_fixmap(FIX_ISAMAP_BEGIN - i,
-+ virt_to_machine(empty_zero_page),
-+ PAGE_KERNEL_RO);
-+}
-+
-+/*
-+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
-+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
-+ * used to involve black magic jumps to work around some nasty CPU bugs,
-+ * but fortunately the switch to using exceptions got rid of all that.
-+ */
-+
-+static void __init test_wp_bit(void)
-+{
-+ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
-+
-+ /* Any page-aligned address will do, the test is non-destructive */
-+ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
-+ boot_cpu_data.wp_works_ok = do_test_wp_bit();
-+ clear_fixmap(FIX_WP_TEST);
-+
-+ if (!boot_cpu_data.wp_works_ok) {
-+ printk("No.\n");
-+#ifdef CONFIG_X86_WP_WORKS_OK
-+ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
-+#endif
-+ } else {
-+ printk("Ok.\n");
-+ }
-+}
-+
-+static void __init set_max_mapnr_init(void)
-+{
-+#ifdef CONFIG_HIGHMEM
-+ num_physpages = highend_pfn;
-+#else
-+ num_physpages = max_low_pfn;
-+#endif
-+#ifdef CONFIG_FLATMEM
-+ max_mapnr = num_physpages;
-+#endif
-+}
-+
-+static struct kcore_list kcore_mem, kcore_vmalloc;
-+
-+void __init mem_init(void)
-+{
-+ extern int ppro_with_ram_bug(void);
-+ int codesize, reservedpages, datasize, initsize;
-+ int tmp;
-+ int bad_ppro;
-+ unsigned long pfn;
-+
-+ contiguous_bitmap = alloc_bootmem_low_pages(
-+ (max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+ BUG_ON(!contiguous_bitmap);
-+ memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+
-+#if defined(CONFIG_SWIOTLB)
-+ swiotlb_init();
-+#endif
-+
-+#ifdef CONFIG_FLATMEM
-+ if (!mem_map)
-+ BUG();
-+#endif
-+
-+ bad_ppro = ppro_with_ram_bug();
-+
-+#ifdef CONFIG_HIGHMEM
-+ /* check that fixmap and pkmap do not overlap */
-+ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
-+ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
-+ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
-+ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
-+ BUG();
-+ }
-+#endif
-+
-+ set_max_mapnr_init();
-+
-+#ifdef CONFIG_HIGHMEM
-+ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-+#else
-+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
-+#endif
-+ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
-+ VMALLOC_START,VMALLOC_END,MAXMEM);
-+ BUG_ON(VMALLOC_START > VMALLOC_END);
-+
-+ /* this will put all low memory onto the freelists */
-+ totalram_pages += free_all_bootmem();
-+ /* XEN: init and count low-mem pages outside initial allocation. */
-+ for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
-+ ClearPageReserved(&mem_map[pfn]);
-+ set_page_count(&mem_map[pfn], 1);
-+ totalram_pages++;
-+ }
-+
-+ reservedpages = 0;
-+ for (tmp = 0; tmp < max_low_pfn; tmp++)
-+ /*
-+ * Only count reserved RAM pages
-+ */
-+ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
-+ reservedpages++;
-+
-+ set_highmem_pages_init(bad_ppro);
-+
-+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
-+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
-+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-+
-+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
-+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
-+ VMALLOC_END-VMALLOC_START);
-+
-+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
-+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+ num_physpages << (PAGE_SHIFT-10),
-+ codesize >> 10,
-+ reservedpages << (PAGE_SHIFT-10),
-+ datasize >> 10,
-+ initsize >> 10,
-+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
-+ );
-+
-+#ifdef CONFIG_X86_PAE
-+ if (!cpu_has_pae)
-+ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
-+#endif
-+ if (boot_cpu_data.wp_works_ok < 0)
-+ test_wp_bit();
-+
-+ /*
-+ * Subtle. SMP is doing it's boot stuff late (because it has to
-+ * fork idle threads) - but it also needs low mappings for the
-+ * protected-mode entry to work. We zap these entries only after
-+ * the WP-bit has been tested.
-+ */
-+#ifndef CONFIG_SMP
-+ zap_low_mappings();
-+#endif
-+
-+ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
-+}
-+
-+/*
-+ * this is for the non-NUMA, single node SMP system case.
-+ * Specifically, in the case of x86, we will always add
-+ * memory to the highmem for now.
-+ */
-+#ifndef CONFIG_NEED_MULTIPLE_NODES
-+int add_memory(u64 start, u64 size)
-+{
-+ struct pglist_data *pgdata = &contig_page_data;
-+ struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
-+ unsigned long start_pfn = start >> PAGE_SHIFT;
-+ unsigned long nr_pages = size >> PAGE_SHIFT;
-+
-+ return __add_pages(zone, start_pfn, nr_pages);
-+}
-+
-+int remove_memory(u64 start, u64 size)
-+{
-+ return -EINVAL;
-+}
-+#endif
-+
-+kmem_cache_t *pgd_cache;
-+kmem_cache_t *pmd_cache;
-+
-+void __init pgtable_cache_init(void)
-+{
-+ if (PTRS_PER_PMD > 1) {
-+ pmd_cache = kmem_cache_create("pmd",
-+ PTRS_PER_PMD*sizeof(pmd_t),
-+ PTRS_PER_PMD*sizeof(pmd_t),
-+ 0,
-+ pmd_ctor,
-+ NULL);
-+ if (!pmd_cache)
-+ panic("pgtable_cache_init(): cannot create pmd cache");
-+ }
-+ pgd_cache = kmem_cache_create("pgd",
-+#ifndef CONFIG_XEN
-+ PTRS_PER_PGD*sizeof(pgd_t),
-+ PTRS_PER_PGD*sizeof(pgd_t),
-+#else
-+ PAGE_SIZE,
-+ PAGE_SIZE,
-+#endif
-+ 0,
-+ pgd_ctor,
-+ pgd_dtor);
-+ if (!pgd_cache)
-+ panic("pgtable_cache_init(): Cannot create pgd cache");
-+}
-+
-+/*
-+ * This function cannot be __init, since exceptions don't work in that
-+ * section. Put this after the callers, so that it cannot be inlined.
-+ */
-+static int noinline do_test_wp_bit(void)
-+{
-+ char tmp_reg;
-+ int flag;
-+
-+ __asm__ __volatile__(
-+ " movb %0,%1 \n"
-+ "1: movb %1,%0 \n"
-+ " xorl %2,%2 \n"
-+ "2: \n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 4 \n"
-+ " .long 1b,2b \n"
-+ ".previous \n"
-+ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
-+ "=q" (tmp_reg),
-+ "=r" (flag)
-+ :"2" (1)
-+ :"memory");
-+
-+ return flag;
-+}
-+
-+void free_initmem(void)
-+{
-+ unsigned long addr;
-+
-+ addr = (unsigned long)(&__init_begin);
-+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-+ ClearPageReserved(virt_to_page(addr));
-+ set_page_count(virt_to_page(addr), 1);
-+ memset((void *)addr, 0xcc, PAGE_SIZE);
-+ free_page(addr);
-+ totalram_pages++;
-+ }
-+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
-+}
-+
-+#ifdef CONFIG_DEBUG_RODATA
-+
-+extern char __start_rodata, __end_rodata;
-+void mark_rodata_ro(void)
-+{
-+ unsigned long addr = (unsigned long)&__start_rodata;
-+
-+ for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
-+ change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
-+
-+ printk ("Write protecting the kernel read-only data: %luk\n",
-+ (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
-+
-+ /*
-+ * change_page_attr() requires a global_flush_tlb() call after it.
-+ * We do this after the printk so that if something went wrong in the
-+ * change, the printk gets out at least to give a better debug hint
-+ * of who is the culprit.
-+ */
-+ global_flush_tlb();
-+}
-+#endif
-+
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+ if (start < end)
-+ printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-+ for (; start < end; start += PAGE_SIZE) {
-+ ClearPageReserved(virt_to_page(start));
-+ set_page_count(virt_to_page(start), 1);
-+ free_page(start);
-+ totalram_pages++;
-+ }
-+}
-+#endif
-diff --git a/arch/i386/mm/ioremap-xen.c b/arch/i386/mm/ioremap-xen.c
-new file mode 100644
-index 0000000..a9a32ba
---- /dev/null
-+++ b/arch/i386/mm/ioremap-xen.c
-@@ -0,0 +1,462 @@
-+/*
-+ * arch/i386/mm/ioremap.c
-+ *
-+ * Re-map IO memory to kernel address space so that we can access it.
-+ * This is needed for high PCI addresses that aren't mapped in the
-+ * 640k-1MB IO memory area on PC's
-+ *
-+ * (C) Copyright 1995 1996 Linus Torvalds
-+ */
-+
-+#include <linux/vmalloc.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/module.h>
-+#include <asm/io.h>
-+#include <asm/fixmap.h>
-+#include <asm/cacheflush.h>
-+#include <asm/tlbflush.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+
-+#define ISA_START_ADDRESS 0x0
-+#define ISA_END_ADDRESS 0x100000
-+
-+#if 0 /* not PAE safe */
-+/* These hacky macros avoid phys->machine translations. */
-+#define __direct_pte(x) ((pte_t) { (x) } )
-+#define __direct_mk_pte(page_nr,pgprot) \
-+ __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-+#define direct_mk_pte_phys(physpage, pgprot) \
-+ __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-+#endif
-+
-+static int direct_remap_area_pte_fn(pte_t *pte,
-+ struct page *pte_page,
-+ unsigned long address,
-+ void *data)
-+{
-+ mmu_update_t **v = (mmu_update_t **)data;
-+
-+ (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
-+ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-+ (*v)++;
-+
-+ return 0;
-+}
-+
-+static int __direct_remap_pfn_range(struct mm_struct *mm,
-+ unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid)
-+{
-+ int rc;
-+ unsigned long i, start_address;
-+ mmu_update_t *u, *v, *w;
-+
-+ u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-+ if (u == NULL)
-+ return -ENOMEM;
-+
-+ start_address = address;
-+
-+ flush_cache_all();
-+
-+ for (i = 0; i < size; i += PAGE_SIZE) {
-+ if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
-+ /* Fill in the PTE pointers. */
-+ rc = generic_page_range(mm, start_address,
-+ address - start_address,
-+ direct_remap_area_pte_fn, &w);
-+ if (rc)
-+ goto out;
-+ w = u;
-+ rc = -EFAULT;
-+ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
-+ goto out;
-+ v = u;
-+ start_address = address;
-+ }
-+
-+ /*
-+ * Fill in the machine address: PTE ptr is done later by
-+ * __direct_remap_area_pages().
-+ */
-+ v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
-+
-+ mfn++;
-+ address += PAGE_SIZE;
-+ v++;
-+ }
-+
-+ if (v != u) {
-+ /* get the ptep's filled in */
-+ rc = generic_page_range(mm, start_address, address - start_address,
-+ direct_remap_area_pte_fn, &w);
-+ if (rc)
-+ goto out;
-+ rc = -EFAULT;
-+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
-+ goto out;
-+ }
-+
-+ rc = 0;
-+
-+ out:
-+ flush_tlb_all();
-+
-+ free_page((unsigned long)u);
-+
-+ return rc;
-+}
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+ unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid)
-+{
-+ /* Same as remap_pfn_range(). */
-+ vma->vm_flags |= VM_IO | VM_RESERVED;
-+
-+ if (domid == DOMID_SELF)
-+ return -EINVAL;
-+
-+ return __direct_remap_pfn_range(
-+ vma->vm_mm, address, mfn, size, prot, domid);
-+}
-+EXPORT_SYMBOL(direct_remap_pfn_range);
-+
-+int direct_kernel_remap_pfn_range(unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid)
-+{
-+ return __direct_remap_pfn_range(
-+ &init_mm, address, mfn, size, prot, domid);
-+}
-+EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
-+
-+static int lookup_pte_fn(
-+ pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-+{
-+ uint64_t *ptep = (uint64_t *)data;
-+ if (ptep)
-+ *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
-+ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-+ return 0;
-+}
-+
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+ unsigned long address,
-+ uint64_t *ptep)
-+{
-+ return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
-+}
-+
-+EXPORT_SYMBOL(create_lookup_pte_addr);
-+
-+static int noop_fn(
-+ pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-+{
-+ return 0;
-+}
-+
-+int touch_pte_range(struct mm_struct *mm,
-+ unsigned long address,
-+ unsigned long size)
-+{
-+ return generic_page_range(mm, address, size, noop_fn, NULL);
-+}
-+
-+EXPORT_SYMBOL(touch_pte_range);
-+
-+/*
-+ * Does @address reside within a non-highmem page that is local to this virtual
-+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
-+ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
-+ * why this works.
-+ */
-+static inline int is_local_lowmem(unsigned long address)
-+{
-+ extern unsigned long max_low_pfn;
-+ return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
-+}
-+
-+/*
-+ * Generic mapping function (not visible outside):
-+ */
-+
-+/*
-+ * Remap an arbitrary physical address space into the kernel virtual
-+ * address space. Needed when the kernel wants to access high addresses
-+ * directly.
-+ *
-+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-+ * have to convert them into an offset in a page-aligned mapping, but the
-+ * caller shouldn't need to know that small detail.
-+ */
-+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
-+{
-+ void __iomem * addr;
-+ struct vm_struct * area;
-+ unsigned long offset, last_addr;
-+ domid_t domid = DOMID_IO;
-+
-+ /* Don't allow wraparound or zero size */
-+ last_addr = phys_addr + size - 1;
-+ if (!size || last_addr < phys_addr)
-+ return NULL;
-+
-+ /*
-+ * Don't remap the low PCI/ISA area, it's always mapped..
-+ */
-+ if (xen_start_info->flags & SIF_PRIVILEGED &&
-+ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+ return (void __iomem *) isa_bus_to_virt(phys_addr);
-+
-+ /*
-+ * Don't allow anybody to remap normal RAM that we're using..
-+ */
-+ if (is_local_lowmem(phys_addr)) {
-+ char *t_addr, *t_end;
-+ struct page *page;
-+
-+ t_addr = bus_to_virt(phys_addr);
-+ t_end = t_addr + (size - 1);
-+
-+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-+ if(!PageReserved(page))
-+ return NULL;
-+
-+ domid = DOMID_SELF;
-+ }
-+
-+ /*
-+ * Mappings have to be page-aligned
-+ */
-+ offset = phys_addr & ~PAGE_MASK;
-+ phys_addr &= PAGE_MASK;
-+ size = PAGE_ALIGN(last_addr+1) - phys_addr;
-+
-+ /*
-+ * Ok, go for it..
-+ */
-+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
-+ if (!area)
-+ return NULL;
-+ area->phys_addr = phys_addr;
-+ addr = (void __iomem *) area->addr;
-+ flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
-+#ifdef __x86_64__
-+ flags |= _PAGE_USER;
-+#endif
-+ if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
-+ phys_addr>>PAGE_SHIFT,
-+ size, __pgprot(flags), domid)) {
-+ vunmap((void __force *) addr);
-+ return NULL;
-+ }
-+ return (void __iomem *) (offset + (char __iomem *)addr);
-+}
-+EXPORT_SYMBOL(__ioremap);
-+
-+/**
-+ * ioremap_nocache - map bus memory into CPU space
-+ * @offset: bus address of the memory
-+ * @size: size of the resource to map
-+ *
-+ * ioremap_nocache performs a platform specific sequence of operations to
-+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
-+ * writew/writel functions and the other mmio helpers. The returned
-+ * address is not guaranteed to be usable directly as a virtual
-+ * address.
-+ *
-+ * This version of ioremap ensures that the memory is marked uncachable
-+ * on the CPU as well as honouring existing caching rules from things like
-+ * the PCI bus. Note that there are other caches and buffers on many
-+ * busses. In particular driver authors should read up on PCI writes
-+ *
-+ * It's useful if some control registers are in such an area and
-+ * write combining or read caching is not desirable:
-+ *
-+ * Must be freed with iounmap.
-+ */
-+
-+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-+{
-+ unsigned long last_addr;
-+ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
-+ if (!p)
-+ return p;
-+
-+ /* Guaranteed to be > phys_addr, as per __ioremap() */
-+ last_addr = phys_addr + size - 1;
-+
-+ if (is_local_lowmem(last_addr)) {
-+ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
-+ unsigned long npages;
-+
-+ phys_addr &= PAGE_MASK;
-+
-+ /* This might overflow and become zero.. */
-+ last_addr = PAGE_ALIGN(last_addr);
-+
-+ /* .. but that's ok, because modulo-2**n arithmetic will make
-+ * the page-aligned "last - first" come out right.
-+ */
-+ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
-+
-+ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
-+ iounmap(p);
-+ p = NULL;
-+ }
-+ global_flush_tlb();
-+ }
-+
-+ return p;
-+}
-+EXPORT_SYMBOL(ioremap_nocache);
-+
-+/**
-+ * iounmap - Free a IO remapping
-+ * @addr: virtual address from ioremap_*
-+ *
-+ * Caller must ensure there is only one unmapping for the same pointer.
-+ */
-+void iounmap(volatile void __iomem *addr)
-+{
-+ struct vm_struct *p, *o;
-+
-+ if ((void __force *)addr <= high_memory)
-+ return;
-+
-+ /*
-+ * __ioremap special-cases the PCI/ISA range by not instantiating a
-+ * vm_area and by simply returning an address into the kernel mapping
-+ * of ISA space. So handle that here.
-+ */
-+ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-+ return;
-+
-+ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
-+
-+ /* Use the vm area unlocked, assuming the caller
-+ ensures there isn't another iounmap for the same address
-+ in parallel. Reuse of the virtual address is prevented by
-+ leaving it in the global lists until we're done with it.
-+ cpa takes care of the direct mappings. */
-+ read_lock(&vmlist_lock);
-+ for (p = vmlist; p; p = p->next) {
-+ if (p->addr == addr)
-+ break;
-+ }
-+ read_unlock(&vmlist_lock);
-+
-+ if (!p) {
-+ printk("iounmap: bad address %p\n", addr);
-+ dump_stack();
-+ return;
-+ }
-+
-+ /* Reset the direct mapping. Can block */
-+ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
-+ /* p->size includes the guard page, but cpa doesn't like that */
-+ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
-+ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
-+ PAGE_KERNEL);
-+ global_flush_tlb();
-+ }
-+
-+ /* Finally remove it */
-+ o = remove_vm_area((void *)addr);
-+ BUG_ON(p != o || o == NULL);
-+ kfree(p);
-+}
-+EXPORT_SYMBOL(iounmap);
-+
-+#ifdef __i386__
-+
-+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-+{
-+ unsigned long offset, last_addr;
-+ unsigned int nrpages;
-+ enum fixed_addresses idx;
-+
-+ /* Don't allow wraparound or zero size */
-+ last_addr = phys_addr + size - 1;
-+ if (!size || last_addr < phys_addr)
-+ return NULL;
-+
-+ /*
-+ * Don't remap the low PCI/ISA area, it's always mapped..
-+ */
-+ if (xen_start_info->flags & SIF_PRIVILEGED &&
-+ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+ return isa_bus_to_virt(phys_addr);
-+
-+ /*
-+ * Mappings have to be page-aligned
-+ */
-+ offset = phys_addr & ~PAGE_MASK;
-+ phys_addr &= PAGE_MASK;
-+ size = PAGE_ALIGN(last_addr) - phys_addr;
-+
-+ /*
-+ * Mappings have to fit in the FIX_BTMAP area.
-+ */
-+ nrpages = size >> PAGE_SHIFT;
-+ if (nrpages > NR_FIX_BTMAPS)
-+ return NULL;
-+
-+ /*
-+ * Ok, go for it..
-+ */
-+ idx = FIX_BTMAP_BEGIN;
-+ while (nrpages > 0) {
-+ set_fixmap(idx, phys_addr);
-+ phys_addr += PAGE_SIZE;
-+ --idx;
-+ --nrpages;
-+ }
-+ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
-+}
-+
-+void __init bt_iounmap(void *addr, unsigned long size)
-+{
-+ unsigned long virt_addr;
-+ unsigned long offset;
-+ unsigned int nrpages;
-+ enum fixed_addresses idx;
-+
-+ virt_addr = (unsigned long)addr;
-+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
-+ return;
-+ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-+ return;
-+ offset = virt_addr & ~PAGE_MASK;
-+ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
-+
-+ idx = FIX_BTMAP_BEGIN;
-+ while (nrpages > 0) {
-+ clear_fixmap(idx);
-+ --idx;
-+ --nrpages;
-+ }
-+}
-+
-+#endif /* __i386__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/arch/i386/mm/pgtable-xen.c b/arch/i386/mm/pgtable-xen.c
-new file mode 100644
-index 0000000..c2d97b5
---- /dev/null
-+++ b/arch/i386/mm/pgtable-xen.c
-@@ -0,0 +1,646 @@
-+/*
-+ * linux/arch/i386/mm/pgtable.c
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/highmem.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/spinlock.h>
-+#include <linux/module.h>
-+
-+#include <asm/system.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
-+
-+#include <xen/features.h>
-+#include <xen/foreign_page.h>
-+#include <asm/hypervisor.h>
-+
-+static void pgd_test_and_unpin(pgd_t *pgd);
-+
-+void show_mem(void)
-+{
-+ int total = 0, reserved = 0;
-+ int shared = 0, cached = 0;
-+ int highmem = 0;
-+ struct page *page;
-+ pg_data_t *pgdat;
-+ unsigned long i;
-+ struct page_state ps;
-+ unsigned long flags;
-+
-+ printk(KERN_INFO "Mem-info:\n");
-+ show_free_areas();
-+ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-+ for_each_pgdat(pgdat) {
-+ pgdat_resize_lock(pgdat, &flags);
-+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+ page = pgdat_page_nr(pgdat, i);
-+ total++;
-+ if (PageHighMem(page))
-+ highmem++;
-+ if (PageReserved(page))
-+ reserved++;
-+ else if (PageSwapCache(page))
-+ cached++;
-+ else if (page_count(page))
-+ shared += page_count(page) - 1;
-+ }
-+ pgdat_resize_unlock(pgdat, &flags);
-+ }
-+ printk(KERN_INFO "%d pages of RAM\n", total);
-+ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
-+ printk(KERN_INFO "%d reserved pages\n", reserved);
-+ printk(KERN_INFO "%d pages shared\n", shared);
-+ printk(KERN_INFO "%d pages swap cached\n", cached);
-+
-+ get_page_state(&ps);
-+ printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty);
-+ printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
-+ printk(KERN_INFO "%lu pages mapped\n", ps.nr_mapped);
-+ printk(KERN_INFO "%lu pages slab\n", ps.nr_slab);
-+ printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
-+}
-+
-+/*
-+ * Associate a virtual page frame with a given physical page frame
-+ * and protection flags for that frame.
-+ */
-+static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ pgd = swapper_pg_dir + pgd_index(vaddr);
-+ if (pgd_none(*pgd)) {
-+ BUG();
-+ return;
-+ }
-+ pud = pud_offset(pgd, vaddr);
-+ if (pud_none(*pud)) {
-+ BUG();
-+ return;
-+ }
-+ pmd = pmd_offset(pud, vaddr);
-+ if (pmd_none(*pmd)) {
-+ BUG();
-+ return;
-+ }
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ /* <pfn,flags> stored as-is, to permit clearing entries */
-+ set_pte(pte, pfn_pte(pfn, flags));
-+
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+/*
-+ * Associate a virtual page frame with a given physical page frame
-+ * and protection flags for that frame.
-+ */
-+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
-+ pgprot_t flags)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ pgd = swapper_pg_dir + pgd_index(vaddr);
-+ if (pgd_none(*pgd)) {
-+ BUG();
-+ return;
-+ }
-+ pud = pud_offset(pgd, vaddr);
-+ if (pud_none(*pud)) {
-+ BUG();
-+ return;
-+ }
-+ pmd = pmd_offset(pud, vaddr);
-+ if (pmd_none(*pmd)) {
-+ BUG();
-+ return;
-+ }
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ /* <pfn,flags> stored as-is, to permit clearing entries */
-+ set_pte(pte, pfn_pte_ma(pfn, flags));
-+
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+/*
-+ * Associate a large virtual page frame with a given physical page frame
-+ * and protection flags for that frame. pfn is for the base of the page,
-+ * vaddr is what the page gets mapped to - both must be properly aligned.
-+ * The pmd must already be instantiated. Assumes PAE mode.
-+ */
-+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+
-+ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
-+ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
-+ return; /* BUG(); */
-+ }
-+ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
-+ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
-+ return; /* BUG(); */
-+ }
-+ pgd = swapper_pg_dir + pgd_index(vaddr);
-+ if (pgd_none(*pgd)) {
-+ printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
-+ return; /* BUG(); */
-+ }
-+ pud = pud_offset(pgd, vaddr);
-+ pmd = pmd_offset(pud, vaddr);
-+ set_pmd(pmd, pfn_pmd(pfn, flags));
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+static int nr_fixmaps = 0;
-+unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
-+EXPORT_SYMBOL(__FIXADDR_TOP);
-+
-+void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
-+{
-+ unsigned long address = __fix_to_virt(idx);
-+
-+ if (idx >= __end_of_fixed_addresses) {
-+ BUG();
-+ return;
-+ }
-+ switch (idx) {
-+ case FIX_WP_TEST:
-+#ifdef CONFIG_X86_F00F_BUG
-+ case FIX_F00F_IDT:
-+#endif
-+ set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-+ break;
-+ default:
-+ set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
-+ break;
-+ }
-+ nr_fixmaps++;
-+}
-+
-+void set_fixaddr_top(unsigned long top)
-+{
-+ BUG_ON(nr_fixmaps > 0);
-+ __FIXADDR_TOP = top - PAGE_SIZE;
-+}
-+
-+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-+{
-+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-+ if (pte)
-+ make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
-+ return pte;
-+}
-+
-+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-+{
-+ struct page *pte;
-+
-+#ifdef CONFIG_HIGHPTE
-+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
-+#else
-+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-+ if (pte) {
-+ SetPageForeign(pte, pte_free);
-+ set_page_count(pte, 1);
-+ }
-+#endif
-+ return pte;
-+}
-+
-+void pte_free(struct page *pte)
-+{
-+ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
-+
-+ if (!pte_write(*virt_to_ptep(va)))
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
-+
-+ ClearPageForeign(pte);
-+ set_page_count(pte, 1);
-+
-+ __free_page(pte);
-+}
-+
-+void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
-+{
-+ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+}
-+
-+/*
-+ * List of all pgd's needed for non-PAE so it can invalidate entries
-+ * in both cached and uncached pgd's; not needed for PAE since the
-+ * kernel pmd is shared. If PAE were not to share the pmd a similar
-+ * tactic would be needed. This is essentially codepath-based locking
-+ * against pageattr.c; it is the unique case in which a valid change
-+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
-+ * vmalloc faults work because attached pagetables are never freed.
-+ * The locking scheme was chosen on the basis of manfred's
-+ * recommendations and having no core impact whatsoever.
-+ * -- wli
-+ */
-+DEFINE_SPINLOCK(pgd_lock);
-+struct page *pgd_list;
-+
-+static inline void pgd_list_add(pgd_t *pgd)
-+{
-+ struct page *page = virt_to_page(pgd);
-+ page->index = (unsigned long)pgd_list;
-+ if (pgd_list)
-+ set_page_private(pgd_list, (unsigned long)&page->index);
-+ pgd_list = page;
-+ set_page_private(page, (unsigned long)&pgd_list);
-+}
-+
-+static inline void pgd_list_del(pgd_t *pgd)
-+{
-+ struct page *next, **pprev, *page = virt_to_page(pgd);
-+ next = (struct page *)page->index;
-+ pprev = (struct page **)page_private(page);
-+ *pprev = next;
-+ if (next)
-+ set_page_private(next, (unsigned long)pprev);
-+}
-+
-+void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-+{
-+ unsigned long flags;
-+
-+ if (PTRS_PER_PMD > 1) {
-+ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
-+ int rc = xen_create_contiguous_region(
-+ (unsigned long)pgd, 0, 32);
-+ BUG_ON(rc);
-+ }
-+ if (HAVE_SHARED_KERNEL_PMD)
-+ memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+ swapper_pg_dir + USER_PTRS_PER_PGD,
-+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-+ } else {
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+ swapper_pg_dir + USER_PTRS_PER_PGD,
-+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-+ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+ pgd_list_add(pgd);
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+ }
-+}
-+
-+void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-+{
-+ unsigned long flags; /* can be called from interrupt context */
-+
-+ if (PTRS_PER_PMD > 1) {
-+ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
-+ xen_destroy_contiguous_region((unsigned long)pgd, 0);
-+ } else {
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ pgd_list_del(pgd);
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+
-+ pgd_test_and_unpin(pgd);
-+ }
-+}
-+
-+pgd_t *pgd_alloc(struct mm_struct *mm)
-+{
-+ int i;
-+ pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
-+
-+ pgd_test_and_unpin(pgd);
-+
-+ if (PTRS_PER_PMD == 1 || !pgd)
-+ return pgd;
-+
-+ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-+ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+ if (!pmd)
-+ goto out_oom;
-+ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
-+ }
-+
-+ if (!HAVE_SHARED_KERNEL_PMD) {
-+ unsigned long flags;
-+
-+ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+ if (!pmd)
-+ goto out_oom;
-+ set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
-+ }
-+
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+ unsigned long v = (unsigned long)i << PGDIR_SHIFT;
-+ pgd_t *kpgd = pgd_offset_k(v);
-+ pud_t *kpud = pud_offset(kpgd, v);
-+ pmd_t *kpmd = pmd_offset(kpud, v);
-+ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+ memcpy(pmd, kpmd, PAGE_SIZE);
-+ make_lowmem_page_readonly(
-+ pmd, XENFEAT_writable_page_tables);
-+ }
-+ pgd_list_add(pgd);
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+ }
-+
-+ return pgd;
-+
-+out_oom:
-+ for (i--; i >= 0; i--)
-+ kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
-+ kmem_cache_free(pgd_cache, pgd);
-+ return NULL;
-+}
-+
-+void pgd_free(pgd_t *pgd)
-+{
-+ int i;
-+
-+ pgd_test_and_unpin(pgd);
-+
-+ /* in the PAE case user pgd entries are overwritten before usage */
-+ if (PTRS_PER_PMD > 1) {
-+ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-+ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+ kmem_cache_free(pmd_cache, pmd);
-+ }
-+ if (!HAVE_SHARED_KERNEL_PMD) {
-+ unsigned long flags;
-+ spin_lock_irqsave(&pgd_lock, flags);
-+ pgd_list_del(pgd);
-+ spin_unlock_irqrestore(&pgd_lock, flags);
-+ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+ make_lowmem_page_writable(
-+ pmd, XENFEAT_writable_page_tables);
-+ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+ kmem_cache_free(pmd_cache, pmd);
-+ }
-+ }
-+ }
-+ /* in the non-PAE case, free_pgtables() clears user pgd entries */
-+ kmem_cache_free(pgd_cache, pgd);
-+}
-+
-+void make_lowmem_page_readonly(void *va, unsigned int feature)
-+{
-+ pte_t *pte;
-+ int rc;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pte = virt_to_ptep(va);
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)va, pte_wrprotect(*pte), 0);
-+ BUG_ON(rc);
-+}
-+
-+void make_lowmem_page_writable(void *va, unsigned int feature)
-+{
-+ pte_t *pte;
-+ int rc;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pte = virt_to_ptep(va);
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)va, pte_mkwrite(*pte), 0);
-+ BUG_ON(rc);
-+}
-+
-+void make_page_readonly(void *va, unsigned int feature)
-+{
-+ pte_t *pte;
-+ int rc;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pte = virt_to_ptep(va);
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)va, pte_wrprotect(*pte), 0);
-+ if (rc) /* fallback? */
-+ xen_l1_entry_update(pte, pte_wrprotect(*pte));
-+ if ((unsigned long)va >= (unsigned long)high_memory) {
-+ unsigned long pfn = pte_pfn(*pte);
-+#ifdef CONFIG_HIGHMEM
-+ if (pfn >= highstart_pfn)
-+ kmap_flush_unused(); /* flush stale writable kmaps */
-+ else
-+#endif
-+ make_lowmem_page_readonly(
-+ phys_to_virt(pfn << PAGE_SHIFT), feature);
-+ }
-+}
-+
-+void make_page_writable(void *va, unsigned int feature)
-+{
-+ pte_t *pte;
-+ int rc;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pte = virt_to_ptep(va);
-+ rc = HYPERVISOR_update_va_mapping(
-+ (unsigned long)va, pte_mkwrite(*pte), 0);
-+ if (rc) /* fallback? */
-+ xen_l1_entry_update(pte, pte_mkwrite(*pte));
-+ if ((unsigned long)va >= (unsigned long)high_memory) {
-+ unsigned long pfn = pte_pfn(*pte);
-+#ifdef CONFIG_HIGHMEM
-+ if (pfn < highstart_pfn)
-+#endif
-+ make_lowmem_page_writable(
-+ phys_to_virt(pfn << PAGE_SHIFT), feature);
-+ }
-+}
-+
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
-+{
-+ if (xen_feature(feature))
-+ return;
-+
-+ while (nr-- != 0) {
-+ make_page_readonly(va, feature);
-+ va = (void *)((unsigned long)va + PAGE_SIZE);
-+ }
-+}
-+
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
-+{
-+ if (xen_feature(feature))
-+ return;
-+
-+ while (nr-- != 0) {
-+ make_page_writable(va, feature);
-+ va = (void *)((unsigned long)va + PAGE_SIZE);
-+ }
-+}
-+
-+static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
-+{
-+ struct page *page = virt_to_page(pt);
-+ unsigned long pfn = page_to_pfn(page);
-+
-+ if (PageHighMem(page))
-+ return;
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(pfn << PAGE_SHIFT),
-+ pfn_pte(pfn, flags), 0));
-+}
-+
-+static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
-+{
-+ pgd_t *pgd = pgd_base;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ int g, u, m;
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return;
-+
-+ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
-+ if (pgd_none(*pgd))
-+ continue;
-+ pud = pud_offset(pgd, 0);
-+ if (PTRS_PER_PUD > 1) /* not folded */
-+ pgd_walk_set_prot(pud,flags);
-+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+ if (pud_none(*pud))
-+ continue;
-+ pmd = pmd_offset(pud, 0);
-+ if (PTRS_PER_PMD > 1) /* not folded */
-+ pgd_walk_set_prot(pmd,flags);
-+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+ if (pmd_none(*pmd))
-+ continue;
-+ pte = pte_offset_kernel(pmd,0);
-+ pgd_walk_set_prot(pte,flags);
-+ }
-+ }
-+ }
-+
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pgd_base,
-+ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
-+ UVMF_TLB_FLUSH));
-+}
-+
-+static void __pgd_pin(pgd_t *pgd)
-+{
-+ pgd_walk(pgd, PAGE_KERNEL_RO);
-+ xen_pgd_pin(__pa(pgd));
-+ set_bit(PG_pinned, &virt_to_page(pgd)->flags);
-+}
-+
-+static void __pgd_unpin(pgd_t *pgd)
-+{
-+ xen_pgd_unpin(__pa(pgd));
-+ pgd_walk(pgd, PAGE_KERNEL);
-+ clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
-+}
-+
-+static void pgd_test_and_unpin(pgd_t *pgd)
-+{
-+ if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
-+ __pgd_unpin(pgd);
-+}
-+
-+void mm_pin(struct mm_struct *mm)
-+{
-+ spin_lock(&mm->page_table_lock);
-+ __pgd_pin(mm->pgd);
-+ spin_unlock(&mm->page_table_lock);
-+}
-+
-+void mm_unpin(struct mm_struct *mm)
-+{
-+ spin_lock(&mm->page_table_lock);
-+ __pgd_unpin(mm->pgd);
-+ spin_unlock(&mm->page_table_lock);
-+}
-+
-+void mm_pin_all(void)
-+{
-+ struct page *page;
-+ for (page = pgd_list; page; page = (struct page *)page->index) {
-+ if (!test_bit(PG_pinned, &page->flags))
-+ __pgd_pin((pgd_t *)page_address(page));
-+ }
-+}
-+
-+void _arch_exit_mmap(struct mm_struct *mm)
-+{
-+ struct task_struct *tsk = current;
-+
-+ task_lock(tsk);
-+
-+ /*
-+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-+ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-+ */
-+ if (tsk->active_mm == mm) {
-+ tsk->active_mm = &init_mm;
-+ atomic_inc(&init_mm.mm_count);
-+
-+ switch_mm(mm, &init_mm, tsk);
-+
-+ atomic_dec(&mm->mm_count);
-+ BUG_ON(atomic_read(&mm->mm_count) == 0);
-+ }
-+
-+ task_unlock(tsk);
-+
-+ if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
-+ (atomic_read(&mm->mm_count) == 1))
-+ mm_unpin(mm);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
-index 9db3242..8b2e540 100644
---- a/arch/i386/mm/pgtable.c
-+++ b/arch/i386/mm/pgtable.c
-@@ -13,6 +13,7 @@
- #include <linux/slab.h>
- #include <linux/pagemap.h>
- #include <linux/spinlock.h>
-+#include <linux/module.h>
-
- #include <asm/system.h>
- #include <asm/pgtable.h>
-@@ -138,6 +139,10 @@ void set_pmd_pfn(unsigned long vaddr, un
- __flush_tlb_one(vaddr);
- }
-
-+static int nr_fixmaps = 0;
-+unsigned long __FIXADDR_TOP = 0xfffff000;
-+EXPORT_SYMBOL(__FIXADDR_TOP);
-+
- void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
- {
- unsigned long address = __fix_to_virt(idx);
-@@ -147,6 +152,13 @@ void __set_fixmap (enum fixed_addresses
- return;
- }
- set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-+ nr_fixmaps++;
-+}
-+
-+void set_fixaddr_top(unsigned long top)
-+{
-+ BUG_ON(nr_fixmaps > 0);
-+ __FIXADDR_TOP = top - PAGE_SIZE;
- }
-
- pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-diff --git a/arch/i386/pci/Makefile b/arch/i386/pci/Makefile
-index 5461d4d..6d72b4e 100644
---- a/arch/i386/pci/Makefile
-+++ b/arch/i386/pci/Makefile
-@@ -4,6 +4,10 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o
- obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o
- obj-$(CONFIG_PCI_DIRECT) += direct.o
-
-+# pcifront should be after pcbios.o, mmconfig.o, and direct.o as it should only
-+# take over if direct access to the PCI bus is unavailable
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
-+
- pci-y := fixup.o
- pci-$(CONFIG_ACPI) += acpi.o
- pci-y += legacy.o irq.o
-@@ -12,3 +16,8 @@ pci-$(CONFIG_X86_VISWS) := visws.o fixu
- pci-$(CONFIG_X86_NUMAQ) := numa.o irq.o
-
- obj-y += $(pci-y) common.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff --git a/arch/i386/pci/irq-xen.c b/arch/i386/pci/irq-xen.c
-new file mode 100644
-index 0000000..80e0b18
---- /dev/null
-+++ b/arch/i386/pci/irq-xen.c
-@@ -0,0 +1,1202 @@
-+/*
-+ * Low-Level PCI Support for PC -- Routing of Interrupts
-+ *
-+ * (c) 1999--2000 Martin Mares <mj@ucw.cz>
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/dmi.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/io_apic.h>
-+#include <linux/irq.h>
-+#include <linux/acpi.h>
-+
-+#include "pci.h"
-+
-+#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
-+#define PIRQ_VERSION 0x0100
-+
-+static int broken_hp_bios_irq9;
-+static int acer_tm360_irqrouting;
-+
-+static struct irq_routing_table *pirq_table;
-+
-+static int pirq_enable_irq(struct pci_dev *dev);
-+
-+/*
-+ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
-+ * Avoid using: 13, 14 and 15 (FP error and IDE).
-+ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
-+ */
-+unsigned int pcibios_irq_mask = 0xfff8;
-+
-+static int pirq_penalty[16] = {
-+ 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
-+ 0, 0, 0, 0, 1000, 100000, 100000, 100000
-+};
-+
-+struct irq_router {
-+ char *name;
-+ u16 vendor, device;
-+ int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
-+ int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
-+};
-+
-+struct irq_router_handler {
-+ u16 vendor;
-+ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
-+};
-+
-+int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
-+void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
-+
-+/*
-+ * Check passed address for the PCI IRQ Routing Table signature
-+ * and perform checksum verification.
-+ */
-+
-+static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
-+{
-+ struct irq_routing_table *rt;
-+ int i;
-+ u8 sum;
-+
-+ rt = (struct irq_routing_table *) addr;
-+ if (rt->signature != PIRQ_SIGNATURE ||
-+ rt->version != PIRQ_VERSION ||
-+ rt->size % 16 ||
-+ rt->size < sizeof(struct irq_routing_table))
-+ return NULL;
-+ sum = 0;
-+ for (i=0; i < rt->size; i++)
-+ sum += addr[i];
-+ if (!sum) {
-+ DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
-+ return rt;
-+ }
-+ return NULL;
-+}
-+
-+
-+
-+/*
-+ * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
-+ */
-+
-+static struct irq_routing_table * __init pirq_find_routing_table(void)
-+{
-+ u8 *addr;
-+ struct irq_routing_table *rt;
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+ if (pirq_table_addr) {
-+ rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
-+ if (rt)
-+ return rt;
-+ printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
-+ }
-+ for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
-+ rt = pirq_check_routing_table(addr);
-+ if (rt)
-+ return rt;
-+ }
-+#endif
-+
-+ return NULL;
-+}
-+
-+/*
-+ * If we have a IRQ routing table, use it to search for peer host
-+ * bridges. It's a gross hack, but since there are no other known
-+ * ways how to get a list of buses, we have to go this way.
-+ */
-+
-+static void __init pirq_peer_trick(void)
-+{
-+ struct irq_routing_table *rt = pirq_table;
-+ u8 busmap[256];
-+ int i;
-+ struct irq_info *e;
-+
-+ memset(busmap, 0, sizeof(busmap));
-+ for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
-+ e = &rt->slots[i];
-+#ifdef DEBUG
-+ {
-+ int j;
-+ DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
-+ for(j=0; j<4; j++)
-+ DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
-+ DBG("\n");
-+ }
-+#endif
-+ busmap[e->bus] = 1;
-+ }
-+ for(i = 1; i < 256; i++) {
-+ if (!busmap[i] || pci_find_bus(0, i))
-+ continue;
-+ if (pci_scan_bus(i, &pci_root_ops, NULL))
-+ printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
-+ }
-+ pcibios_last_bus = -1;
-+}
-+
-+/*
-+ * Code for querying and setting of IRQ routes on various interrupt routers.
-+ */
-+
-+void eisa_set_level_irq(unsigned int irq)
-+{
-+ unsigned char mask = 1 << (irq & 7);
-+ unsigned int port = 0x4d0 + (irq >> 3);
-+ unsigned char val;
-+ static u16 eisa_irq_mask;
-+
-+ if (irq >= 16 || (1 << irq) & eisa_irq_mask)
-+ return;
-+
-+ eisa_irq_mask |= (1 << irq);
-+ printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
-+ val = inb(port);
-+ if (!(val & mask)) {
-+ DBG(KERN_DEBUG " -> edge");
-+ outb(val | mask, port);
-+ }
-+}
-+
-+/*
-+ * Common IRQ routing practice: nybbles in config space,
-+ * offset by some magic constant.
-+ */
-+static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
-+{
-+ u8 x;
-+ unsigned reg = offset + (nr >> 1);
-+
-+ pci_read_config_byte(router, reg, &x);
-+ return (nr & 1) ? (x >> 4) : (x & 0xf);
-+}
-+
-+static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
-+{
-+ u8 x;
-+ unsigned reg = offset + (nr >> 1);
-+
-+ pci_read_config_byte(router, reg, &x);
-+ x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
-+ pci_write_config_byte(router, reg, x);
-+}
-+
-+/*
-+ * ALI pirq entries are damn ugly, and completely undocumented.
-+ * This has been figured out from pirq tables, and it's not a pretty
-+ * picture.
-+ */
-+static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
-+
-+ return irqmap[read_config_nybble(router, 0x48, pirq-1)];
-+}
-+
-+static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
-+ unsigned int val = irqmap[irq];
-+
-+ if (val) {
-+ write_config_nybble(router, 0x48, pirq-1, val);
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
-+ * just a pointer to the config space.
-+ */
-+static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ u8 x;
-+
-+ pci_read_config_byte(router, pirq, &x);
-+ return (x < 16) ? x : 0;
-+}
-+
-+static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ pci_write_config_byte(router, pirq, irq);
-+ return 1;
-+}
-+
-+/*
-+ * The VIA pirq rules are nibble-based, like ALI,
-+ * but without the ugly irq number munging.
-+ * However, PIRQD is in the upper instead of lower 4 bits.
-+ */
-+static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
-+}
-+
-+static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
-+ return 1;
-+}
-+
-+/*
-+ * The VIA pirq rules are nibble-based, like ALI,
-+ * but without the ugly irq number munging.
-+ * However, for 82C586, nibble map is different .
-+ */
-+static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
-+ return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
-+}
-+
-+static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
-+ write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
-+ return 1;
-+}
-+
-+/*
-+ * ITE 8330G pirq rules are nibble-based
-+ * FIXME: pirqmap may be { 1, 0, 3, 2 },
-+ * 2+3 are both mapped to irq 9 on my system
-+ */
-+static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+ return read_config_nybble(router,0x43, pirqmap[pirq-1]);
-+}
-+
-+static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+ write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
-+ return 1;
-+}
-+
-+/*
-+ * OPTI: high four bits are nibble pointer..
-+ * I wonder what the low bits do?
-+ */
-+static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ return read_config_nybble(router, 0xb8, pirq >> 4);
-+}
-+
-+static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ write_config_nybble(router, 0xb8, pirq >> 4, irq);
-+ return 1;
-+}
-+
-+/*
-+ * Cyrix: nibble offset 0x5C
-+ * 0x5C bits 7:4 is INTB bits 3:0 is INTA
-+ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
-+ */
-+static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ return read_config_nybble(router, 0x5C, (pirq-1)^1);
-+}
-+
-+static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
-+ return 1;
-+}
-+
-+/*
-+ * PIRQ routing for SiS 85C503 router used in several SiS chipsets.
-+ * We have to deal with the following issues here:
-+ * - vendors have different ideas about the meaning of link values
-+ * - some onboard devices (integrated in the chipset) have special
-+ * links and are thus routed differently (i.e. not via PCI INTA-INTD)
-+ * - different revision of the router have a different layout for
-+ * the routing registers, particularly for the onchip devices
-+ *
-+ * For all routing registers the common thing is we have one byte
-+ * per routeable link which is defined as:
-+ * bit 7 IRQ mapping enabled (0) or disabled (1)
-+ * bits [6:4] reserved (sometimes used for onchip devices)
-+ * bits [3:0] IRQ to map to
-+ * allowed: 3-7, 9-12, 14-15
-+ * reserved: 0, 1, 2, 8, 13
-+ *
-+ * The config-space registers located at 0x41/0x42/0x43/0x44 are
-+ * always used to route the normal PCI INT A/B/C/D respectively.
-+ * Apparently there are systems implementing PCI routing table using
-+ * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
-+ * We try our best to handle both link mappings.
-+ *
-+ * Currently (2003-05-21) it appears most SiS chipsets follow the
-+ * definition of routing registers from the SiS-5595 southbridge.
-+ * According to the SiS 5595 datasheets the revision id's of the
-+ * router (ISA-bridge) should be 0x01 or 0xb0.
-+ *
-+ * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
-+ * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
-+ * They seem to work with the current routing code. However there is
-+ * some concern because of the two USB-OHCI HCs (original SiS 5595
-+ * had only one). YMMV.
-+ *
-+ * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
-+ *
-+ * 0x61: IDEIRQ:
-+ * bits [6:5] must be written 01
-+ * bit 4 channel-select primary (0), secondary (1)
-+ *
-+ * 0x62: USBIRQ:
-+ * bit 6 OHCI function disabled (0), enabled (1)
-+ *
-+ * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved
-+ *
-+ * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved
-+ *
-+ * We support USBIRQ (in addition to INTA-INTD) and keep the
-+ * IDE, ACPI and DAQ routing untouched as set by the BIOS.
-+ *
-+ * Currently the only reported exception is the new SiS 65x chipset
-+ * which includes the SiS 69x southbridge. Here we have the 85C503
-+ * router revision 0x04 and there are changes in the register layout
-+ * mostly related to the different USB HCs with USB 2.0 support.
-+ *
-+ * Onchip routing for router rev-id 0x04 (try-and-error observation)
-+ *
-+ * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs
-+ * bit 6-4 are probably unused, not like 5595
-+ */
-+
-+#define PIRQ_SIS_IRQ_MASK 0x0f
-+#define PIRQ_SIS_IRQ_DISABLE 0x80
-+#define PIRQ_SIS_USB_ENABLE 0x40
-+
-+static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ u8 x;
-+ int reg;
-+
-+ reg = pirq;
-+ if (reg >= 0x01 && reg <= 0x04)
-+ reg += 0x40;
-+ pci_read_config_byte(router, reg, &x);
-+ return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
-+}
-+
-+static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ u8 x;
-+ int reg;
-+
-+ reg = pirq;
-+ if (reg >= 0x01 && reg <= 0x04)
-+ reg += 0x40;
-+ pci_read_config_byte(router, reg, &x);
-+ x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
-+ x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
-+ pci_write_config_byte(router, reg, x);
-+ return 1;
-+}
-+
-+
-+/*
-+ * VLSI: nibble offset 0x74 - educated guess due to routing table and
-+ * config space of VLSI 82C534 PCI-bridge/router (1004:0102)
-+ * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
-+ * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
-+ * for the busbridge to the docking station.
-+ */
-+
-+static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ if (pirq > 8) {
-+ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
-+ return 0;
-+ }
-+ return read_config_nybble(router, 0x74, pirq-1);
-+}
-+
-+static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ if (pirq > 8) {
-+ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
-+ return 0;
-+ }
-+ write_config_nybble(router, 0x74, pirq-1, irq);
-+ return 1;
-+}
-+
-+/*
-+ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
-+ * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
-+ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
-+ * register is a straight binary coding of desired PIC IRQ (low nibble).
-+ *
-+ * The 'link' value in the PIRQ table is already in the correct format
-+ * for the Index register. There are some special index values:
-+ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
-+ * and 0x03 for SMBus.
-+ */
-+static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ outb_p(pirq, 0xc00);
-+ return inb(0xc01) & 0xf;
-+}
-+
-+static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ outb_p(pirq, 0xc00);
-+ outb_p(irq, 0xc01);
-+ return 1;
-+}
-+
-+/* Support for AMD756 PCI IRQ Routing
-+ * Jhon H. Caicedo <jhcaiced@osso.org.co>
-+ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
-+ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
-+ * The AMD756 pirq rules are nibble-based
-+ * offset 0x56 0-3 PIRQA 4-7 PIRQB
-+ * offset 0x57 0-3 PIRQC 4-7 PIRQD
-+ */
-+static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+ u8 irq;
-+ irq = 0;
-+ if (pirq <= 4)
-+ {
-+ irq = read_config_nybble(router, 0x56, pirq - 1);
-+ }
-+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
-+ dev->vendor, dev->device, pirq, irq);
-+ return irq;
-+}
-+
-+static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
-+ dev->vendor, dev->device, pirq, irq);
-+ if (pirq <= 4)
-+ {
-+ write_config_nybble(router, 0x56, pirq - 1, irq);
-+ }
-+ return 1;
-+}
-+
-+#ifdef CONFIG_PCI_BIOS
-+
-+static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+ struct pci_dev *bridge;
-+ int pin = pci_get_interrupt_pin(dev, &bridge);
-+ return pcibios_set_irq_routing(bridge, pin, irq);
-+}
-+
-+#endif
-+
-+static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ static struct pci_device_id pirq_440gx[] = {
-+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
-+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
-+ { },
-+ };
-+
-+ /* 440GX has a proprietary PIRQ router -- don't use it */
-+ if (pci_dev_present(pirq_440gx))
-+ return 0;
-+
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_INTEL_82371FB_0:
-+ case PCI_DEVICE_ID_INTEL_82371SB_0:
-+ case PCI_DEVICE_ID_INTEL_82371AB_0:
-+ case PCI_DEVICE_ID_INTEL_82371MX:
-+ case PCI_DEVICE_ID_INTEL_82443MX_0:
-+ case PCI_DEVICE_ID_INTEL_82801AA_0:
-+ case PCI_DEVICE_ID_INTEL_82801AB_0:
-+ case PCI_DEVICE_ID_INTEL_82801BA_0:
-+ case PCI_DEVICE_ID_INTEL_82801BA_10:
-+ case PCI_DEVICE_ID_INTEL_82801CA_0:
-+ case PCI_DEVICE_ID_INTEL_82801CA_12:
-+ case PCI_DEVICE_ID_INTEL_82801DB_0:
-+ case PCI_DEVICE_ID_INTEL_82801E_0:
-+ case PCI_DEVICE_ID_INTEL_82801EB_0:
-+ case PCI_DEVICE_ID_INTEL_ESB_1:
-+ case PCI_DEVICE_ID_INTEL_ICH6_0:
-+ case PCI_DEVICE_ID_INTEL_ICH6_1:
-+ case PCI_DEVICE_ID_INTEL_ICH7_0:
-+ case PCI_DEVICE_ID_INTEL_ICH7_1:
-+ case PCI_DEVICE_ID_INTEL_ICH7_30:
-+ case PCI_DEVICE_ID_INTEL_ICH7_31:
-+ case PCI_DEVICE_ID_INTEL_ESB2_0:
-+ case PCI_DEVICE_ID_INTEL_ICH8_0:
-+ case PCI_DEVICE_ID_INTEL_ICH8_1:
-+ case PCI_DEVICE_ID_INTEL_ICH8_2:
-+ case PCI_DEVICE_ID_INTEL_ICH8_3:
-+ case PCI_DEVICE_ID_INTEL_ICH8_4:
-+ r->name = "PIIX/ICH";
-+ r->get = pirq_piix_get;
-+ r->set = pirq_piix_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int via_router_probe(struct irq_router *r,
-+ struct pci_dev *router, u16 device)
-+{
-+ /* FIXME: We should move some of the quirk fixup stuff here */
-+
-+ /*
-+ * work arounds for some buggy BIOSes
-+ */
-+ if (device == PCI_DEVICE_ID_VIA_82C586_0) {
-+ switch(router->device) {
-+ case PCI_DEVICE_ID_VIA_82C686:
-+ /*
-+ * Asus k7m bios wrongly reports 82C686A
-+ * as 586-compatible
-+ */
-+ device = PCI_DEVICE_ID_VIA_82C686;
-+ break;
-+ case PCI_DEVICE_ID_VIA_8235:
-+ /**
-+ * Asus a7v-x bios wrongly reports 8235
-+ * as 586-compatible
-+ */
-+ device = PCI_DEVICE_ID_VIA_8235;
-+ break;
-+ }
-+ }
-+
-+ switch(device) {
-+ case PCI_DEVICE_ID_VIA_82C586_0:
-+ r->name = "VIA";
-+ r->get = pirq_via586_get;
-+ r->set = pirq_via586_set;
-+ return 1;
-+ case PCI_DEVICE_ID_VIA_82C596:
-+ case PCI_DEVICE_ID_VIA_82C686:
-+ case PCI_DEVICE_ID_VIA_8231:
-+ case PCI_DEVICE_ID_VIA_8235:
-+ /* FIXME: add new ones for 8233/5 */
-+ r->name = "VIA";
-+ r->get = pirq_via_get;
-+ r->set = pirq_via_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_VLSI_82C534:
-+ r->name = "VLSI 82C534";
-+ r->get = pirq_vlsi_get;
-+ r->set = pirq_vlsi_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+
-+static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_SERVERWORKS_OSB4:
-+ case PCI_DEVICE_ID_SERVERWORKS_CSB5:
-+ r->name = "ServerWorks";
-+ r->get = pirq_serverworks_get;
-+ r->set = pirq_serverworks_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ if (device != PCI_DEVICE_ID_SI_503)
-+ return 0;
-+
-+ r->name = "SIS";
-+ r->get = pirq_sis_get;
-+ r->set = pirq_sis_set;
-+ return 1;
-+}
-+
-+static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_CYRIX_5520:
-+ r->name = "NatSemi";
-+ r->get = pirq_cyrix_get;
-+ r->set = pirq_cyrix_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_OPTI_82C700:
-+ r->name = "OPTI";
-+ r->get = pirq_opti_get;
-+ r->set = pirq_opti_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_ITE_IT8330G_0:
-+ r->name = "ITE";
-+ r->get = pirq_ite_get;
-+ r->set = pirq_ite_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_AL_M1533:
-+ case PCI_DEVICE_ID_AL_M1563:
-+ printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
-+ r->name = "ALI";
-+ r->get = pirq_ali_get;
-+ r->set = pirq_ali_set;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+ switch(device)
-+ {
-+ case PCI_DEVICE_ID_AMD_VIPER_740B:
-+ r->name = "AMD756";
-+ break;
-+ case PCI_DEVICE_ID_AMD_VIPER_7413:
-+ r->name = "AMD766";
-+ break;
-+ case PCI_DEVICE_ID_AMD_VIPER_7443:
-+ r->name = "AMD768";
-+ break;
-+ default:
-+ return 0;
-+ }
-+ r->get = pirq_amd756_get;
-+ r->set = pirq_amd756_set;
-+ return 1;
-+}
-+
-+static __initdata struct irq_router_handler pirq_routers[] = {
-+ { PCI_VENDOR_ID_INTEL, intel_router_probe },
-+ { PCI_VENDOR_ID_AL, ali_router_probe },
-+ { PCI_VENDOR_ID_ITE, ite_router_probe },
-+ { PCI_VENDOR_ID_VIA, via_router_probe },
-+ { PCI_VENDOR_ID_OPTI, opti_router_probe },
-+ { PCI_VENDOR_ID_SI, sis_router_probe },
-+ { PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
-+ { PCI_VENDOR_ID_VLSI, vlsi_router_probe },
-+ { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
-+ { PCI_VENDOR_ID_AMD, amd_router_probe },
-+ /* Someone with docs needs to add the ATI Radeon IGP */
-+ { 0, NULL }
-+};
-+static struct irq_router pirq_router;
-+static struct pci_dev *pirq_router_dev;
-+
-+
-+/*
-+ * FIXME: should we have an option to say "generic for
-+ * chipset" ?
-+ */
-+
-+static void __init pirq_find_router(struct irq_router *r)
-+{
-+ struct irq_routing_table *rt = pirq_table;
-+ struct irq_router_handler *h;
-+
-+#ifdef CONFIG_PCI_BIOS
-+ if (!rt->signature) {
-+ printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
-+ r->set = pirq_bios_set;
-+ r->name = "BIOS";
-+ return;
-+ }
-+#endif
-+
-+ /* Default unless a driver reloads it */
-+ r->name = "default";
-+ r->get = NULL;
-+ r->set = NULL;
-+
-+ DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
-+ rt->rtr_vendor, rt->rtr_device);
-+
-+ pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
-+ if (!pirq_router_dev) {
-+ DBG(KERN_DEBUG "PCI: Interrupt router not found at "
-+ "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
-+ return;
-+ }
-+
-+ for( h = pirq_routers; h->vendor; h++) {
-+ /* First look for a router match */
-+ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
-+ break;
-+ /* Fall back to a device match */
-+ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
-+ break;
-+ }
-+ printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
-+ pirq_router.name,
-+ pirq_router_dev->vendor,
-+ pirq_router_dev->device,
-+ pci_name(pirq_router_dev));
-+}
-+
-+static struct irq_info *pirq_get_info(struct pci_dev *dev)
-+{
-+ struct irq_routing_table *rt = pirq_table;
-+ int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
-+ struct irq_info *info;
-+
-+ for (info = rt->slots; entries--; info++)
-+ if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
-+ return info;
-+ return NULL;
-+}
-+
-+static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
-+{
-+ u8 pin;
-+ struct irq_info *info;
-+ int i, pirq, newirq;
-+ int irq = 0;
-+ u32 mask;
-+ struct irq_router *r = &pirq_router;
-+ struct pci_dev *dev2 = NULL;
-+ char *msg = NULL;
-+
-+ /* Find IRQ pin */
-+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+ if (!pin) {
-+ DBG(KERN_DEBUG " -> no interrupt pin\n");
-+ return 0;
-+ }
-+ pin = pin - 1;
-+
-+ /* Find IRQ routing entry */
-+
-+ if (!pirq_table)
-+ return 0;
-+
-+ DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
-+ info = pirq_get_info(dev);
-+ if (!info) {
-+ DBG(" -> not found in routing table\n" KERN_DEBUG);
-+ return 0;
-+ }
-+ pirq = info->irq[pin].link;
-+ mask = info->irq[pin].bitmap;
-+ if (!pirq) {
-+ DBG(" -> not routed\n" KERN_DEBUG);
-+ return 0;
-+ }
-+ DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
-+ mask &= pcibios_irq_mask;
-+
-+ /* Work around broken HP Pavilion Notebooks which assign USB to
-+ IRQ 9 even though it is actually wired to IRQ 11 */
-+
-+ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
-+ dev->irq = 11;
-+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
-+ r->set(pirq_router_dev, dev, pirq, 11);
-+ }
-+
-+ /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
-+ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
-+ pirq = 0x68;
-+ mask = 0x400;
-+ dev->irq = r->get(pirq_router_dev, dev, pirq);
-+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
-+ }
-+
-+ /*
-+ * Find the best IRQ to assign: use the one
-+ * reported by the device if possible.
-+ */
-+ newirq = dev->irq;
-+ if (newirq && !((1 << newirq) & mask)) {
-+ if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
-+ else printk("\n" KERN_WARNING
-+ "PCI: IRQ %i for device %s doesn't match PIRQ mask "
-+ "- try pci=usepirqmask\n" KERN_DEBUG, newirq,
-+ pci_name(dev));
-+ }
-+ if (!newirq && assign) {
-+ for (i = 0; i < 16; i++) {
-+ if (!(mask & (1 << i)))
-+ continue;
-+ if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, SA_SHIRQ))
-+ newirq = i;
-+ }
-+ }
-+ DBG(" -> newirq=%d", newirq);
-+
-+ /* Check if it is hardcoded */
-+ if ((pirq & 0xf0) == 0xf0) {
-+ irq = pirq & 0xf;
-+ DBG(" -> hardcoded IRQ %d\n", irq);
-+ msg = "Hardcoded";
-+ } else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
-+ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
-+ DBG(" -> got IRQ %d\n", irq);
-+ msg = "Found";
-+ } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
-+ DBG(" -> assigning IRQ %d", newirq);
-+ if (r->set(pirq_router_dev, dev, pirq, newirq)) {
-+ eisa_set_level_irq(newirq);
-+ DBG(" ... OK\n");
-+ msg = "Assigned";
-+ irq = newirq;
-+ }
-+ }
-+
-+ if (!irq) {
-+ DBG(" ... failed\n");
-+ if (newirq && mask == (1 << newirq)) {
-+ msg = "Guessed";
-+ irq = newirq;
-+ } else
-+ return 0;
-+ }
-+ printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
-+
-+ /* Update IRQ for all devices with the same pirq value */
-+ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
-+ pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
-+ if (!pin)
-+ continue;
-+ pin--;
-+ info = pirq_get_info(dev2);
-+ if (!info)
-+ continue;
-+ if (info->irq[pin].link == pirq) {
-+ /* We refuse to override the dev->irq information. Give a warning! */
-+ if ( dev2->irq && dev2->irq != irq && \
-+ (!(pci_probe & PCI_USE_PIRQ_MASK) || \
-+ ((1 << dev2->irq) & mask)) ) {
-+#ifndef CONFIG_PCI_MSI
-+ printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
-+ pci_name(dev2), dev2->irq, irq);
-+#endif
-+ continue;
-+ }
-+ dev2->irq = irq;
-+ pirq_penalty[irq]++;
-+ if (dev != dev2)
-+ printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
-+ }
-+ }
-+ return 1;
-+}
-+
-+static void __init pcibios_fixup_irqs(void)
-+{
-+ struct pci_dev *dev = NULL;
-+ u8 pin;
-+
-+ DBG(KERN_DEBUG "PCI: IRQ fixup\n");
-+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-+ /*
-+ * If the BIOS has set an out of range IRQ number, just ignore it.
-+ * Also keep track of which IRQ's are already in use.
-+ */
-+ if (dev->irq >= 16) {
-+ DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
-+ dev->irq = 0;
-+ }
-+ /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
-+ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
-+ pirq_penalty[dev->irq] = 0;
-+ pirq_penalty[dev->irq]++;
-+ }
-+
-+ dev = NULL;
-+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+#ifdef CONFIG_X86_IO_APIC
-+ /*
-+ * Recalculate IRQ numbers if we use the I/O APIC.
-+ */
-+ if (io_apic_assign_pci_irqs)
-+ {
-+ int irq;
-+
-+ if (pin) {
-+ pin--; /* interrupt pins are numbered starting from 1 */
-+ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-+ /*
-+ * Busses behind bridges are typically not listed in the MP-table.
-+ * In this case we have to look up the IRQ based on the parent bus,
-+ * parent slot, and pin number. The SMP code detects such bridged
-+ * busses itself so we should get into this branch reliably.
-+ */
-+ if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-+ struct pci_dev * bridge = dev->bus->self;
-+
-+ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-+ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
-+ PCI_SLOT(bridge->devfn), pin);
-+ if (irq >= 0)
-+ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
-+ pci_name(bridge), 'A' + pin, irq);
-+ }
-+ if (irq >= 0) {
-+ if (use_pci_vector() &&
-+ !platform_legacy_irq(irq))
-+ irq = IO_APIC_VECTOR(irq);
-+
-+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
-+ pci_name(dev), 'A' + pin, irq);
-+ dev->irq = irq;
-+ }
-+ }
-+ }
-+#endif
-+ /*
-+ * Still no IRQ? Try to lookup one...
-+ */
-+ if (pin && !dev->irq)
-+ pcibios_lookup_irq(dev, 0);
-+ }
-+}
-+
-+/*
-+ * Work around broken HP Pavilion Notebooks which assign USB to
-+ * IRQ 9 even though it is actually wired to IRQ 11
-+ */
-+static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
-+{
-+ if (!broken_hp_bios_irq9) {
-+ broken_hp_bios_irq9 = 1;
-+ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Work around broken Acer TravelMate 360 Notebooks which assign
-+ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
-+ */
-+static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
-+{
-+ if (!acer_tm360_irqrouting) {
-+ acer_tm360_irqrouting = 1;
-+ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
-+ }
-+ return 0;
-+}
-+
-+static struct dmi_system_id __initdata pciirq_dmi_table[] = {
-+ {
-+ .callback = fix_broken_hp_bios_irq9,
-+ .ident = "HP Pavilion N5400 Series Laptop",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-+ DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
-+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
-+ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
-+ },
-+ },
-+ {
-+ .callback = fix_acer_tm360_irqrouting,
-+ .ident = "Acer TravelMate 36x Laptop",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
-+ },
-+ },
-+ { }
-+};
-+
-+static int __init pcibios_irq_init(void)
-+{
-+ DBG(KERN_DEBUG "PCI: IRQ init\n");
-+
-+ if (pcibios_enable_irq || raw_pci_ops == NULL)
-+ return 0;
-+
-+ dmi_check_system(pciirq_dmi_table);
-+
-+ pirq_table = pirq_find_routing_table();
-+
-+#ifdef CONFIG_PCI_BIOS
-+ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
-+ pirq_table = pcibios_get_irq_routing_table();
-+#endif
-+ if (pirq_table) {
-+ pirq_peer_trick();
-+ pirq_find_router(&pirq_router);
-+ if (pirq_table->exclusive_irqs) {
-+ int i;
-+ for (i=0; i<16; i++)
-+ if (!(pirq_table->exclusive_irqs & (1 << i)))
-+ pirq_penalty[i] += 100;
-+ }
-+ /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
-+ if (io_apic_assign_pci_irqs)
-+ pirq_table = NULL;
-+ }
-+
-+ pcibios_enable_irq = pirq_enable_irq;
-+
-+ pcibios_fixup_irqs();
-+ return 0;
-+}
-+
-+subsys_initcall(pcibios_irq_init);
-+
-+
-+static void pirq_penalize_isa_irq(int irq, int active)
-+{
-+ /*
-+ * If any ISAPnP device reports an IRQ in its list of possible
-+ * IRQ's, we try to avoid assigning it to PCI devices.
-+ */
-+ if (irq < 16) {
-+ if (active)
-+ pirq_penalty[irq] += 1000;
-+ else
-+ pirq_penalty[irq] += 100;
-+ }
-+}
-+
-+void pcibios_penalize_isa_irq(int irq, int active)
-+{
-+#ifdef CONFIG_ACPI
-+ if (!acpi_noirq)
-+ acpi_penalize_isa_irq(irq, active);
-+ else
-+#endif
-+ pirq_penalize_isa_irq(irq, active);
-+}
-+
-+static int pirq_enable_irq(struct pci_dev *dev)
-+{
-+ u8 pin;
-+ struct pci_dev *temp_dev;
-+
-+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+ if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
-+ char *msg = "";
-+
-+ pin--; /* interrupt pins are numbered starting from 1 */
-+
-+ if (io_apic_assign_pci_irqs) {
-+ int irq;
-+
-+ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-+ /*
-+ * Busses behind bridges are typically not listed in the MP-table.
-+ * In this case we have to look up the IRQ based on the parent bus,
-+ * parent slot, and pin number. The SMP code detects such bridged
-+ * busses itself so we should get into this branch reliably.
-+ */
-+ temp_dev = dev;
-+ while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-+ struct pci_dev * bridge = dev->bus->self;
-+
-+ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-+ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
-+ PCI_SLOT(bridge->devfn), pin);
-+ if (irq >= 0)
-+ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
-+ pci_name(bridge), 'A' + pin, irq);
-+ dev = bridge;
-+ }
-+ dev = temp_dev;
-+ if (irq >= 0) {
-+#ifdef CONFIG_PCI_MSI
-+ if (!platform_legacy_irq(irq))
-+ irq = IO_APIC_VECTOR(irq);
-+#endif
-+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
-+ pci_name(dev), 'A' + pin, irq);
-+ dev->irq = irq;
-+ return 0;
-+ } else
-+ msg = " Probably buggy MP table.";
-+ } else if (pci_probe & PCI_BIOS_IRQ_SCAN)
-+ msg = "";
-+ else
-+ msg = " Please try using pci=biosirq.";
-+
-+ /* With IDE legacy devices the IRQ lookup failure is not a problem.. */
-+ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
-+ return 0;
-+
-+ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
-+ 'A' + pin, pci_name(dev), msg);
-+ }
-+ return 0;
-+}
-+
-+int pci_vector_resources(int last, int nr_released)
-+{
-+ int count = nr_released;
-+
-+ int next = last;
-+ int offset = (last % 8);
-+
-+ while (next < FIRST_SYSTEM_VECTOR) {
-+ next += 8;
-+#ifdef CONFIG_X86_64
-+ if (next == IA32_SYSCALL_VECTOR)
-+ continue;
-+#else
-+ if (next == SYSCALL_VECTOR)
-+ continue;
-+#endif
-+ count++;
-+ if (next >= FIRST_SYSTEM_VECTOR) {
-+ if (offset%8) {
-+ next = FIRST_DEVICE_VECTOR + offset;
-+ offset++;
-+ continue;
-+ }
-+ count--;
-+ }
-+ }
-+
-+ return count;
-+}
-diff --git a/arch/i386/pci/pcifront.c b/arch/i386/pci/pcifront.c
-new file mode 100644
-index 0000000..7009115
---- /dev/null
-+++ b/arch/i386/pci/pcifront.c
-@@ -0,0 +1,55 @@
-+/*
-+ * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
-+ * to support the Xen PCI Frontend's operation
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <asm/acpi.h>
-+#include "pci.h"
-+
-+static int pcifront_enable_irq(struct pci_dev *dev)
-+{
-+ u8 irq;
-+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
-+ dev->irq = irq;
-+
-+ return 0;
-+}
-+
-+extern u8 pci_cache_line_size;
-+
-+static int __init pcifront_x86_stub_init(void)
-+{
-+ struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+ /* Only install our method if we haven't found real hardware already */
-+ if (raw_pci_ops)
-+ return 0;
-+
-+ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
-+
-+ /* Copied from arch/i386/pci/common.c */
-+ pci_cache_line_size = 32 >> 2;
-+ if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
-+ pci_cache_line_size = 64 >> 2; /* K7 & K8 */
-+ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
-+ pci_cache_line_size = 128 >> 2; /* P4 */
-+
-+ /* On x86, we need to disable the normal IRQ routing table and
-+ * just ask the backend
-+ */
-+ pcibios_enable_irq = pcifront_enable_irq;
-+ pcibios_disable_irq = NULL;
-+
-+#ifdef CONFIG_ACPI
-+ /* Keep ACPI out of the picture */
-+ acpi_noirq = 1;
-+#endif
-+
-+ return 0;
-+}
-+
-+arch_initcall(pcifront_x86_stub_init);
-diff --git a/arch/i386/power/Makefile b/arch/i386/power/Makefile
-index 8cfa4e8..e74fee6 100644
---- a/arch/i386/power/Makefile
-+++ b/arch/i386/power/Makefile
-@@ -1,2 +1,4 @@
--obj-$(CONFIG_PM) += cpu.o
-+obj-$(CONFIG_PM_LEGACY) += cpu.o
-+obj-$(CONFIG_SOFTWARE_SUSPEND) += cpu.o
-+obj-$(CONFIG_ACPI_SLEEP) += cpu.o
- obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
-diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
-index a85ea9d..cea9e5c 100644
---- a/arch/ia64/Kconfig
-+++ b/arch/ia64/Kconfig
-@@ -50,6 +50,53 @@ config GENERIC_IOMAP
- bool
- default y
-
-+config XEN
-+ bool
-+ default y
-+ help
-+ Enable Xen hypervisor support. Resulting kernel runs
-+ both as a guest OS on Xen and natively on hardware.
-+
-+config ARCH_XEN
-+ bool
-+ default y
-+ help
-+ TEMP ONLY. Needs to be on for drivers/xen to build.
-+
-+config XEN_PRIVILEGED_GUEST
-+ bool "Privileged Guest"
-+ default n
-+ help
-+ Used in drivers/xen/privcmd.c. Should go away?
-+
-+config XEN_BLKDEV_GRANT
-+ depends on XEN
-+ bool
-+ default y
-+
-+config XEN_BLKDEV_FRONTEND
-+ depends on XEN
-+ bool
-+ default y
-+
-+config XEN_BLKDEV_BACKEND
-+ depends on XEN
-+ bool
-+ default y
-+
-+config XEN_VT
-+ bool "Override for turning on CONFIG_VT for domU"
-+ default y
-+ help
-+ Hack to turn off CONFIG_VT for domU
-+
-+config VT
-+ bool
-+ default y if XEN && XEN_VT
-+ default n if XEN && !XEN_VT
-+ help
-+ Hack to turn off CONFIG_VT for domU
-+
- config SCHED_NO_NO_OMIT_FRAME_POINTER
- bool
- default y
-diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
-index f722e1a..172b4cf 100644
---- a/arch/ia64/Makefile
-+++ b/arch/ia64/Makefile
-@@ -52,9 +52,15 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia6
- core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
- core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
- core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
-+core-$(CONFIG_XEN) += arch/ia64/xen/
-
- drivers-$(CONFIG_PCI) += arch/ia64/pci/
-+ifneq ($(CONFIG_XEN),y)
- drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
-+endif
-+ifneq ($(CONFIG_IA64_GENERIC),y)
-+drivers-$(CONFIG_XEN) += arch/ia64/hp/sim/
-+endif
- drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
- drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
- drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
-@@ -68,6 +74,8 @@ all: compressed unwcheck
-
- compressed: vmlinux.gz
-
-+vmlinuz: vmlinux.gz
-+
- vmlinux.gz: vmlinux
- $(Q)$(MAKE) $(build)=$(boot) $@
-
-@@ -82,7 +90,7 @@ CLEAN_FILES += vmlinux.gz bootloader
- boot: lib/lib.a vmlinux
- $(Q)$(MAKE) $(build)=$(boot) $@
-
--install: vmlinux.gz
-+install:
- sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
-
- define archhelp
-diff --git a/arch/ia64/hp/sim/Makefile b/arch/ia64/hp/sim/Makefile
-index d10da47..b0916cd 100644
---- a/arch/ia64/hp/sim/Makefile
-+++ b/arch/ia64/hp/sim/Makefile
-@@ -14,3 +14,5 @@ obj-$(CONFIG_HP_SIMETH) += simeth.o
- obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
- obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
- obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
-+obj-$(CONFIG_XEN) += simserial.o
-+obj-$(CONFIG_XEN) += hpsim_console.o
-diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
-index 930fdfc..67738bb 100644
---- a/arch/ia64/kernel/entry.S
-+++ b/arch/ia64/kernel/entry.S
-@@ -181,7 +181,7 @@ END(sys_clone)
- * called. The code starting at .map relies on this. The rest of the code
- * doesn't care about the interrupt masking status.
- */
--GLOBAL_ENTRY(ia64_switch_to)
-+GLOBAL_ENTRY(__ia64_switch_to)
- .prologue
- alloc r16=ar.pfs,1,0,0,0
- DO_SAVE_SWITCH_STACK
-@@ -235,7 +235,7 @@ GLOBAL_ENTRY(ia64_switch_to)
- ;;
- srlz.d
- br.cond.sptk .done
--END(ia64_switch_to)
-+END(__ia64_switch_to)
-
- /*
- * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
-@@ -376,7 +376,7 @@ END(save_switch_stack)
- * - b7 holds address to return to
- * - must not touch r8-r11
- */
--ENTRY(load_switch_stack)
-+GLOBAL_ENTRY(load_switch_stack)
- .prologue
- .altrp b7
-
-@@ -511,7 +511,7 @@ END(clone)
- * because some system calls (such as ia64_execve) directly
- * manipulate ar.pfs.
- */
--GLOBAL_ENTRY(ia64_trace_syscall)
-+GLOBAL_ENTRY(__ia64_trace_syscall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * We need to preserve the scratch registers f6-f11 in case the system
-@@ -583,7 +583,7 @@ strace_error:
- (p6) mov r10=-1
- (p6) mov r8=r9
- br.cond.sptk .strace_save_retval
--END(ia64_trace_syscall)
-+END(__ia64_trace_syscall)
-
- /*
- * When traced and returning from sigreturn, we invoke syscall_trace but then
-@@ -636,8 +636,11 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
- adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
- mov r10=r0 // clear error indication in r10
- (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
-+ ;;
-+ // don't fall through, ia64_leave_syscall may be #define'd
-+ br.cond.sptk.few ia64_leave_syscall
-+ ;;
- END(ia64_ret_from_syscall)
-- // fall through
- /*
- * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
- * need to switch to bank 0 and doesn't restore the scratch registers.
-@@ -682,7 +685,7 @@ END(ia64_ret_from_syscall)
- * ar.csd: cleared
- * ar.ssd: cleared
- */
--ENTRY(ia64_leave_syscall)
-+GLOBAL_ENTRY(__ia64_leave_syscall)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it returns to
-@@ -790,7 +793,7 @@ ENTRY(ia64_leave_syscall)
- mov.m ar.ssd=r0 // M2 clear ar.ssd
- mov f11=f0 // F clear f11
- br.cond.sptk.many rbs_switch // B
--END(ia64_leave_syscall)
-+END(__ia64_leave_syscall)
-
- #ifdef CONFIG_IA32_SUPPORT
- GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
-@@ -802,10 +805,13 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
- st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
- .mem.offset 8,0
- st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
-+ ;;
-+ // don't fall through, ia64_leave_kernel may be #define'd
-+ br.cond.sptk.few ia64_leave_kernel
-+ ;;
- END(ia64_ret_from_ia32_execve)
-- // fall through
- #endif /* CONFIG_IA32_SUPPORT */
--GLOBAL_ENTRY(ia64_leave_kernel)
-+GLOBAL_ENTRY(__ia64_leave_kernel)
- PT_REGS_UNWIND_INFO(0)
- /*
- * work.need_resched etc. mustn't get changed by this CPU before it returns to
-@@ -1150,7 +1156,7 @@ skip_rbs_switch:
- ld8 r10=[r3]
- br.cond.sptk.many .work_processed_syscall // re-check
-
--END(ia64_leave_kernel)
-+END(__ia64_leave_kernel)
-
- ENTRY(handle_syscall_error)
- /*
-@@ -1190,7 +1196,7 @@ END(ia64_invoke_schedule_tail)
- * be set up by the caller. We declare 8 input registers so the system call
- * args get preserved, in case we need to restart a system call.
- */
--ENTRY(notify_resume_user)
-+GLOBAL_ENTRY(notify_resume_user)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
- mov r9=ar.unat
-@@ -1278,7 +1284,7 @@ ENTRY(sys_rt_sigreturn)
- adds sp=16,sp
- ;;
- ld8 r9=[sp] // load new ar.unat
-- mov.sptk b7=r8,ia64_leave_kernel
-+ mov.sptk b7=r8,__ia64_leave_kernel
- ;;
- mov ar.unat=r9
- br.many b7
-diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
-index f1778a8..9fae5fb 100644
---- a/arch/ia64/kernel/head.S
-+++ b/arch/ia64/kernel/head.S
-@@ -363,6 +363,12 @@ start_ap:
- ;;
- (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
-
-+#ifdef CONFIG_XEN
-+ // Note: isBP is used by the subprogram.
-+ br.call.sptk.many rp=early_xen_setup
-+ ;;
-+#endif
-+
- #ifdef CONFIG_SMP
- (isAP) br.call.sptk.many rp=start_secondary
- .ret0:
-diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
-index 5018c7f..7bc41ea 100644
---- a/arch/ia64/kernel/pal.S
-+++ b/arch/ia64/kernel/pal.S
-@@ -16,6 +16,7 @@
- #include <asm/processor.h>
-
- .data
-+ .globl pal_entry_point
- pal_entry_point:
- data8 ia64_pal_default_handler
- .text
-@@ -53,7 +54,7 @@ END(ia64_pal_default_handler)
- * in4 1 ==> clear psr.ic, 0 ==> don't clear psr.ic
- *
- */
--GLOBAL_ENTRY(ia64_pal_call_static)
-+GLOBAL_ENTRY(__ia64_pal_call_static)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
- alloc loc1 = ar.pfs,5,5,0,0
- movl loc2 = pal_entry_point
-@@ -90,7 +91,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
- ;;
- srlz.d // seralize restoration of psr.l
- br.ret.sptk.many b0
--END(ia64_pal_call_static)
-+END(__ia64_pal_call_static)
-
- /*
- * Make a PAL call using the stacked registers calling convention.
-diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
-index 056f7a6..a798e50 100644
---- a/arch/ia64/kernel/sal.c
-+++ b/arch/ia64/kernel/sal.c
-@@ -336,6 +336,9 @@ ia64_sal_init (struct ia64_sal_systab *s
- p += SAL_DESC_SIZE(*p);
- }
-
-+#ifdef CONFIG_XEN
-+ if (!running_on_xen)
-+#endif
- check_sal_cache_flush();
- }
-
-diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
-index 3258e09..e478453 100644
---- a/arch/ia64/kernel/setup.c
-+++ b/arch/ia64/kernel/setup.c
-@@ -61,6 +61,9 @@
- #include <asm/system.h>
- #include <asm/unistd.h>
- #include <asm/system.h>
-+#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
-+#endif
-
- #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
- # error "struct cpuinfo_ia64 too big!"
-@@ -243,6 +246,14 @@ reserve_memory (void)
- rsvd_region[n].end = (unsigned long) ia64_imva(_end);
- n++;
-
-+#ifdef CONFIG_XEN
-+ if (running_on_xen) {
-+ rsvd_region[n].start = (unsigned long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
-+ rsvd_region[n].end = rsvd_region[n].start + PAGE_SIZE;
-+ n++;
-+ }
-+#endif
-+
- #ifdef CONFIG_BLK_DEV_INITRD
- if (ia64_boot_param->initrd_start) {
- rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
-@@ -260,6 +271,7 @@ reserve_memory (void)
- n++;
-
- num_rsvd_regions = n;
-+ BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
-
- sort_regions(rsvd_region, num_rsvd_regions);
- }
-@@ -333,6 +345,10 @@ early_console_setup (char *cmdline)
- {
- int earlycons = 0;
-
-+#ifdef CONFIG_XEN
-+ if (!early_xen_console_setup(cmdline))
-+ earlycons++;
-+#endif
- #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
- {
- extern int sn_serial_console_early_setup(void);
-diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
-new file mode 100644
-index 0000000..8a83e57
---- /dev/null
-+++ b/arch/ia64/xen/Makefile
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for Xen components
-+#
-+
-+obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o xenconsole.o xen_ksyms.o
-diff --git a/arch/ia64/xen/drivers/Makefile b/arch/ia64/xen/drivers/Makefile
-new file mode 100644
-index 0000000..57394ea
---- /dev/null
-+++ b/arch/ia64/xen/drivers/Makefile
-@@ -0,0 +1,20 @@
-+
-+obj-y += util.o
-+
-+obj-y += core/
-+obj-y += console/
-+obj-y += evtchn/
-+#obj-y += balloon/
-+obj-y += privcmd/
-+obj-y += blkback/
-+#obj-y += netback/
-+obj-y += blkfront/
-+obj-y += xenbus/
-+#obj-y += netfront/
-+#obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += privcmd/
-+#obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
-+#obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
-+#obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
-+#obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
-+#obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
-+
-diff --git a/arch/ia64/xen/drivers/README b/arch/ia64/xen/drivers/README
-new file mode 100644
-index 0000000..33aecaa
---- /dev/null
-+++ b/arch/ia64/xen/drivers/README
-@@ -0,0 +1,2 @@
-+This is a temporary location for source/Makefiles that need to be
-+patched/reworked in drivers/xen to work with xenlinux/ia64.
-diff --git a/arch/ia64/xen/drivers/coreMakefile b/arch/ia64/xen/drivers/coreMakefile
-new file mode 100644
-index 0000000..40c4f13
---- /dev/null
-+++ b/arch/ia64/xen/drivers/coreMakefile
-@@ -0,0 +1,24 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+XENARCH := $(subst ",,$(CONFIG_XENARCH))
-+
-+CPPFLAGS_vmlinux.lds += -U$(XENARCH)
-+
-+$(obj)/vmlinux.lds.S:
-+ @ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
-+
-+
-+obj-y := gnttab.o
-+obj-$(CONFIG_PROC_FS) += xen_proc.o
-+
-+ifeq ($(ARCH),ia64)
-+obj-y += evtchn_ia64.o
-+obj-y += xenia64_init.o
-+else
-+extra-y += vmlinux.lds
-+obj-y += reboot.o evtchn.o fixup.o
-+obj-$(CONFIG_SMP) += smp.o # setup_profiling_timer def'd in ia64
-+obj-$(CONFIG_NET) += skbuff.o # until networking is up on ia64
-+endif
-diff --git a/arch/ia64/xen/drivers/evtchn_ia64.c b/arch/ia64/xen/drivers/evtchn_ia64.c
-new file mode 100644
-index 0000000..fa753e5
---- /dev/null
-+++ b/arch/ia64/xen/drivers/evtchn_ia64.c
-@@ -0,0 +1,273 @@
-+/* NOTE: This file split off from evtchn.c because there was
-+ some discussion that the mechanism is sufficiently different.
-+ It may be possible to merge it back in the future... djm */
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <asm/hw_irq.h>
-+#include <xen/evtchn.h>
-+
-+#define MAX_EVTCHN 1024
-+
-+/* Xen will never allocate port zero for any purpose. */
-+#define VALID_EVTCHN(_chn) (((_chn) != 0) && ((_chn) < MAX_EVTCHN))
-+
-+/* Binding types. Hey, only IRQT_VIRQ and IRQT_EVTCHN are supported now
-+ * for XEN/IA64 - ktian1
-+ */
-+enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
-+
-+/* Constructor for packed IRQ information. */
-+#define mk_irq_info(type, index, evtchn) \
-+ (((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
-+/* Convenient shorthand for packed representation of an unbound IRQ. */
-+#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
-+/* Accessor macros for packed IRQ information. */
-+#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
-+#define index_from_irq(irq) ((u8)(irq_info[irq] >> 16))
-+#define type_from_irq(irq) ((u8)(irq_info[irq] >> 24))
-+
-+/* Packed IRQ information: binding type, sub-type index, and event channel. */
-+static u32 irq_info[NR_IRQS];
-+
-+/* One note for XEN/IA64 is that we have all event channels bound to one
-+ * physical irq vector. So we always mean evtchn vector identical to 'irq'
-+ * vector in this context. - ktian1
-+ */
-+static struct {
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *);
-+ void *dev_id;
-+ char opened; /* Whether allocated */
-+} evtchns[MAX_EVTCHN];
-+
-+/*
-+ * This lock protects updates to the following mapping and reference-count
-+ * arrays. The lock does not need to be acquired to read the mapping tables.
-+ */
-+static spinlock_t irq_mapping_update_lock;
-+
-+void mask_evtchn(int port)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ synch_set_bit(port, &s->evtchn_mask[0]);
-+}
-+EXPORT_SYMBOL(mask_evtchn);
-+
-+void unmask_evtchn(int port)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ unsigned int cpu = smp_processor_id();
-+ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-+
-+#if 0 // FIXME: diverged from x86 evtchn.c
-+ /* Slow path (hypercall) if this is a non-local port. */
-+ if (unlikely(cpu != cpu_from_evtchn(port))) {
-+ evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
-+ .u.unmask.port = port };
-+ (void)HYPERVISOR_event_channel_op(&op);
-+ return;
-+ }
-+#endif
-+
-+ synch_clear_bit(port, &s->evtchn_mask[0]);
-+
-+ /*
-+ * The following is basically the equivalent of 'hw_resend_irq'. Just
-+ * like a real IO-APIC we 'lose the interrupt edge' if the channel is
-+ * masked.
-+ */
-+ if (synch_test_bit(port, &s->evtchn_pending[0]) &&
-+ !synch_test_and_set_bit(port / BITS_PER_LONG,
-+ &vcpu_info->evtchn_pending_sel)) {
-+ vcpu_info->evtchn_upcall_pending = 1;
-+ if (!vcpu_info->evtchn_upcall_mask)
-+ force_evtchn_callback();
-+ }
-+}
-+EXPORT_SYMBOL(unmask_evtchn);
-+
-+
-+#define unbound_irq(e) (VALID_EVTCHN(e) && (!evtchns[(e)].opened))
-+int bind_virq_to_irqhandler(
-+ unsigned int virq,
-+ unsigned int cpu,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ evtchn_op_t op;
-+ int evtchn;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ op.cmd = EVTCHNOP_bind_virq;
-+ op.u.bind_virq.virq = virq;
-+ op.u.bind_virq.vcpu = cpu;
-+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
-+ evtchn = op.u.bind_virq.port;
-+
-+ if (!unbound_irq(evtchn)) {
-+ evtchn = -EINVAL;
-+ goto out;
-+ }
-+
-+ evtchns[evtchn].handler = handler;
-+ evtchns[evtchn].dev_id = dev_id;
-+ evtchns[evtchn].opened = 1;
-+ irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+
-+ unmask_evtchn(evtchn);
-+out:
-+ spin_unlock(&irq_mapping_update_lock);
-+ return evtchn;
-+}
-+
-+int bind_evtchn_to_irqhandler(unsigned int evtchn,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags, const char * devname, void *dev_id)
-+{
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if (!unbound_irq(evtchn)) {
-+ evtchn = -EINVAL;
-+ goto out;
-+ }
-+
-+ evtchns[evtchn].handler = handler;
-+ evtchns[evtchn].dev_id = dev_id;
-+ evtchns[evtchn].opened = 1;
-+ irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
-+
-+ unmask_evtchn(evtchn);
-+out:
-+ spin_unlock(&irq_mapping_update_lock);
-+ return evtchn;
-+}
-+
-+int bind_ipi_to_irqhandler(
-+ unsigned int ipi,
-+ unsigned int cpu,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ printk("%s is called which has not been supported now...?\n", __FUNCTION__);
-+ while(1);
-+}
-+
-+void unbind_from_irqhandler(unsigned int irq, void *dev_id)
-+{
-+ evtchn_op_t op;
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if (unbound_irq(irq))
-+ goto out;
-+
-+ op.cmd = EVTCHNOP_close;
-+ op.u.close.port = evtchn;
-+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+
-+ switch (type_from_irq(irq)) {
-+ case IRQT_VIRQ:
-+ /* Add smp stuff later... */
-+ break;
-+ case IRQT_IPI:
-+ /* Add smp stuff later... */
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ mask_evtchn(evtchn);
-+ evtchns[evtchn].handler = NULL;
-+ evtchns[evtchn].opened = 0;
-+
-+out:
-+ spin_unlock(&irq_mapping_update_lock);
-+}
-+
-+void notify_remote_via_irq(int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (!unbound_irq(evtchn))
-+ notify_remote_via_evtchn(evtchn);
-+}
-+
-+irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ unsigned long l1, l2;
-+ unsigned int l1i, l2i, port;
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *);
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()];
-+
-+ vcpu_info->evtchn_upcall_mask = 1;
-+ vcpu_info->evtchn_upcall_pending = 0;
-+
-+ /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
-+ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-+ while ( l1 != 0 )
-+ {
-+ l1i = __ffs(l1);
-+ l1 &= ~(1UL << l1i);
-+
-+ while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
-+ {
-+ l2i = __ffs(l2);
-+ l2 &= ~(1UL << l2i);
-+
-+ port = (l1i * BITS_PER_LONG) + l2i;
-+ if ( (handler = evtchns[port].handler) != NULL )
-+ {
-+ clear_evtchn(port);
-+ handler(port, evtchns[port].dev_id, regs);
-+ }
-+ else
-+ {
-+ evtchn_device_upcall(port);
-+ }
-+ }
-+ }
-+ vcpu_info->evtchn_upcall_mask = 0;
-+ return IRQ_HANDLED;
-+}
-+
-+void force_evtchn_callback(void)
-+{
-+ //(void)HYPERVISOR_xen_version(0, NULL);
-+}
-+
-+static struct irqaction evtchn_irqaction = {
-+ .handler = evtchn_interrupt,
-+ .flags = SA_INTERRUPT,
-+ .name = "xen-event-channel"
-+};
-+
-+int evtchn_irq = 0xe9;
-+void __init evtchn_init(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()];
-+
-+#if 0
-+ int ret;
-+ irq = assign_irq_vector(AUTO_ASSIGN);
-+ ret = request_irq(irq, evtchn_interrupt, 0, "xen-event-channel", NULL);
-+ if (ret < 0)
-+ {
-+ printk("xen-event-channel unable to get irq %d (%d)\n", irq, ret);
-+ return;
-+ }
-+#endif
-+ register_percpu_irq(evtchn_irq, &evtchn_irqaction);
-+
-+ vcpu_info->arch.evtchn_vector = evtchn_irq;
-+ printk("xen-event-channel using irq %d\n", evtchn_irq);
-+
-+ spin_lock_init(&irq_mapping_update_lock);
-+ memset(evtchns, 0, sizeof(evtchns));
-+}
-diff --git a/arch/ia64/xen/drivers/patches/blkback.c.patch b/arch/ia64/xen/drivers/patches/blkback.c.patch
-new file mode 100644
-index 0000000..0554403
---- /dev/null
-+++ b/arch/ia64/xen/drivers/patches/blkback.c.patch
-@@ -0,0 +1,57 @@
-+diff -Naur xen/blkback/blkback.c xen.patched/blkback/blkback.c
-+--- xen/blkback/blkback.c 2005-09-23 10:54:50.000000000 -0600
-++++ xen.patched/blkback/blkback.c 2005-09-23 10:57:51.000000000 -0600
-+@@ -30,10 +30,16 @@
-+ static unsigned long mmap_vstart;
-+ #define MMAP_PAGES \
-+ (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-++#ifdef __ia64__
-++static void *pending_vaddrs[MMAP_PAGES];
-++#define MMAP_VADDR(_idx, _i) \
-++ (unsigned long)(pending_vaddrs[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
-++#else
-+ #define MMAP_VADDR(_req,_seg) \
-+ (mmap_vstart + \
-+ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
-+ ((_seg) * PAGE_SIZE))
-++#endif
-+
-+ /*
-+ * Each outstanding request that we've passed to the lower device layers has a
-+@@ -377,9 +383,13 @@
-+ goto bad_descriptor;
-+ }
-+
-++#ifdef __ia64__
-++ MMAP_VADDR(pending_idx,i) = gnttab_map_vaddr(map[i]);
-++#else
-+ phys_to_machine_mapping[__pa(MMAP_VADDR(
-+ pending_idx, i)) >> PAGE_SHIFT] =
-+ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
-++#endif
-+
-+ pending_handle(pending_idx, i) = map[i].handle;
-+ }
-+@@ -500,9 +510,22 @@
-+
-+ blkif_interface_init();
-+
-++#ifdef __ia64__
-++ {
-++ extern unsigned long alloc_empty_foreign_map_page_range(unsigned long pages);
-++ int i;
-++
-++ mmap_vstart = alloc_empty_foreign_map_page_range(MMAP_PAGES);
-++ printk("Allocated mmap_vstart: 0x%lx\n", mmap_vstart);
-++ for(i = 0; i < MMAP_PAGES; i++)
-++ pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
-++ BUG_ON(mmap_vstart == NULL);
-++ }
-++#else
-+ page = balloon_alloc_empty_page_range(MMAP_PAGES);
-+ BUG_ON(page == NULL);
-+ mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-++#endif
-+
-+ pending_cons = 0;
-+ pending_prod = MAX_PENDING_REQS;
-diff --git a/arch/ia64/xen/drivers/patches/console.c.patch b/arch/ia64/xen/drivers/patches/console.c.patch
-new file mode 100644
-index 0000000..71d655d
---- /dev/null
-+++ b/arch/ia64/xen/drivers/patches/console.c.patch
-@@ -0,0 +1,18 @@
-+--- xen/console/console.c 2005-11-02 14:13:07.000000000 +0100
-++++ xen.patched/console/console.c 2005-11-02 14:21:20.000000000 +0100
-+@@ -768,9 +771,15 @@
-+ #endif
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN) {
-++#ifdef __ia64__
-++ xencons_priv_irq = bind_virq_to_evtchn(VIRQ_CONSOLE);
-++ bind_evtchn_to_irqhandler(xencons_priv_irq,
-++ xencons_priv_interrupt, 0, "console", NULL);
-++#else
-+ xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
-+ (void)request_irq(xencons_priv_irq,
-+ xencons_priv_interrupt, 0, "console", NULL);
-++#endif
-+ } else {
-+ xencons_ring_register_receiver(xencons_rx);
-+ }
-diff --git a/arch/ia64/xen/drivers/patches/devmem.c.patch b/arch/ia64/xen/drivers/patches/devmem.c.patch
-new file mode 100644
-index 0000000..9242964
---- /dev/null
-+++ b/arch/ia64/xen/drivers/patches/devmem.c.patch
-@@ -0,0 +1,3 @@
-+diff -Naur xen/core/devmem.c xen.patched/core/devmem.c
-+--- xen/core/devmem.c 2005-09-23 10:54:50.000000000 -0600
-++++ xen.patched/core/devmem.c 2005-09-23 10:57:51.000000000 -0600
-diff --git a/arch/ia64/xen/drivers/patches/gnttab.c.patch b/arch/ia64/xen/drivers/patches/gnttab.c.patch
-new file mode 100644
-index 0000000..90272db
---- /dev/null
-+++ b/arch/ia64/xen/drivers/patches/gnttab.c.patch
-@@ -0,0 +1,46 @@
-+diff -Naur xen/core/gnttab.c xen.patched/core/gnttab.c
-+--- xen/core/gnttab.c 2005-09-23 10:54:50.000000000 -0600
-++++ xen.patched/core/gnttab.c 2005-09-23 10:57:51.000000000 -0600
-+@@ -346,6 +350,10 @@
-+ if ( hypercall.op != __HYPERVISOR_grant_table_op )
-+ return -ENOSYS;
-+
-++
-++#ifdef __ia64__
-++ ret = HYPERVISOR_grant_table_op(hypercall.arg[0], (void *)hypercall.arg[1], hypercall.arg[2]);
-++#else
-+ /* hypercall-invoking asm taken from privcmd.c */
-+ __asm__ __volatile__ (
-+ "pushl %%ebx; pushl %%ecx; pushl %%edx; "
-+@@ -359,6 +367,7 @@
-+ TRAP_INSTR "; "
-+ "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
-+ : "=a" (ret) : "0" (&hypercall) : "memory" );
-++#endif
-+
-+ return ret;
-+ }
-+@@ -423,8 +432,13 @@
-+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
-+ BUG_ON(setup.status != 0);
-+
-++#ifdef __ia64__
-++ shared = __va(frames[0] << PAGE_SHIFT);
-++ printk("grant table at %p\n", shared);
-++#else
-+ for (i = 0; i < NR_GRANT_FRAMES; i++)
-+ set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
-++#endif
-+
-+ return 0;
-+ }
-+@@ -450,7 +466,9 @@
-+
-+ BUG_ON(gnttab_resume());
-+
-++#ifndef __ia64__
-+ shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
-++#endif
-+
-+ for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
-+ gnttab_list[i] = i + 1;
-diff --git a/arch/ia64/xen/drivers/patches/privcmd.c.patch b/arch/ia64/xen/drivers/patches/privcmd.c.patch
-new file mode 100644
-index 0000000..347ffa7
---- /dev/null
-+++ b/arch/ia64/xen/drivers/patches/privcmd.c.patch
-@@ -0,0 +1,43 @@
-+diff -Naur xen/privcmd/privcmd.c xen.patched/privcmd/privcmd.c
-+--- xen/privcmd/privcmd.c 2005-09-23 10:54:50.000000000 -0600
-++++ xen.patched/privcmd/privcmd.c 2005-09-23 10:57:51.000000000 -0600
-+@@ -180,6 +183,15 @@
-+ for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
-+ if (get_user(mfn, p))
-+ return -EFAULT;
-++#ifdef __ia64__
-++ ret = remap_pfn_range(vma,
-++ addr&PAGE_MASK,
-++ mfn,
-++ 1<<PAGE_SHIFT,
-++ vma->vm_page_prot);
-++ if (ret < 0)
-++ goto batch_err;
-++#else
-+
-+ ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
-+ if (ret)
-+@@ -190,6 +202,7 @@
-+
-+ if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
-+ put_user(0xF0000000 | mfn, p);
-++#endif
-+ }
-+
-+ ret = 0;
-+@@ -205,6 +218,7 @@
-+ break;
-+ #endif
-+
-++#ifndef __ia64__
-+ case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: {
-+ unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
-+ pgd_t *pgd = pgd_offset_k(m2pv);
-+@@ -216,6 +230,7 @@
-+ -EFAULT: 0;
-+ }
-+ break;
-++#endif
-+
-+ default:
-+ ret = -EINVAL;
-diff --git a/arch/ia64/xen/drivers/xenia64_init.c b/arch/ia64/xen/drivers/xenia64_init.c
-new file mode 100644
-index 0000000..8d441df
---- /dev/null
-+++ b/arch/ia64/xen/drivers/xenia64_init.c
-@@ -0,0 +1,55 @@
-+#ifdef __ia64__
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/efi.h>
-+#include <asm/sal.h>
-+#include <asm/hypervisor.h>
-+/* #include <asm-xen/evtchn.h> */
-+#include <linux/vmalloc.h>
-+
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)0xf100000000000000;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
-+
-+static int initialized;
-+start_info_t *xen_start_info;
-+
-+int xen_init(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+
-+ if (initialized)
-+ return running_on_xen ? 0 : -1;
-+
-+ if (!running_on_xen)
-+ return -1;
-+
-+ xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
-+ xen_start_info->flags = s->arch.flags;
-+ printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%d flags=0x%x\n",
-+ s->arch.start_info_pfn, xen_start_info->nr_pages,
-+ xen_start_info->flags);
-+
-+ evtchn_init();
-+ initialized = 1;
-+ return 0;
-+}
-+
-+/* We just need a range of legal va here, though finally identity
-+ * mapped one is instead used for gnttab mapping.
-+ */
-+unsigned long alloc_empty_foreign_map_page_range(unsigned long pages)
-+{
-+ struct vm_struct *vma;
-+
-+ if ( (vma = get_vm_area(PAGE_SIZE * pages, VM_ALLOC)) == NULL )
-+ return NULL;
-+
-+ return (unsigned long)vma->addr;
-+}
-+
-+#if 0
-+/* These should be define'd but some drivers use them without
-+ * a convenient arch include */
-+unsigned long mfn_to_pfn(unsigned long mfn) { return mfn; }
-+#endif
-+#endif
-diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
-new file mode 100644
-index 0000000..6fb0a90
---- /dev/null
-+++ b/arch/ia64/xen/hypercall.S
-@@ -0,0 +1,323 @@
-+/*
-+ * Support routines for Xen hypercalls
-+ *
-+ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
-+ */
-+
-+#include <linux/config.h>
-+#include <asm/processor.h>
-+#include <asm/asmmacro.h>
-+
-+GLOBAL_ENTRY(xen_get_ivr)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov r8=cr.ivr;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r9=XSI_PSR_IC
-+ ;;
-+ ld8 r10=[r9]
-+ ;;
-+ st8 [r9]=r0
-+ ;;
-+ XEN_HYPER_GET_IVR
-+ ;;
-+ st8 [r9]=r10
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_get_ivr)
-+
-+GLOBAL_ENTRY(xen_get_tpr)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov r8=cr.tpr;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r9=XSI_PSR_IC
-+ ;;
-+ ld8 r10=[r9]
-+ ;;
-+ st8 [r9]=r0
-+ ;;
-+ XEN_HYPER_GET_TPR
-+ ;;
-+ st8 [r9]=r10
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_get_tpr)
-+
-+GLOBAL_ENTRY(xen_set_tpr)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov cr.tpr=r32;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r9=XSI_PSR_IC
-+ mov r8=r32
-+ ;;
-+ ld8 r10=[r9]
-+ ;;
-+ st8 [r9]=r0
-+ ;;
-+ XEN_HYPER_SET_TPR
-+ ;;
-+ st8 [r9]=r10
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_set_tpr)
-+
-+GLOBAL_ENTRY(xen_eoi)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov cr.eoi=r0;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r9=XSI_PSR_IC
-+ mov r8=r32
-+ ;;
-+ ld8 r10=[r9]
-+ ;;
-+ st8 [r9]=r0
-+ ;;
-+ XEN_HYPER_EOI
-+ ;;
-+ st8 [r9]=r10
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_eoi)
-+
-+GLOBAL_ENTRY(xen_thash)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) thash r8=r32;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r9=XSI_PSR_IC
-+ mov r8=r32
-+ ;;
-+ ld8 r10=[r9]
-+ ;;
-+ st8 [r9]=r0
-+ ;;
-+ XEN_HYPER_THASH
-+ ;;
-+ st8 [r9]=r10
-+ ;;
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_thash)
-+
-+GLOBAL_ENTRY(xen_set_itm)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov cr.itm=r32;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r9=XSI_PSR_IC
-+ mov r8=r32
-+ ;;
-+ ld8 r10=[r9]
-+ ;;
-+ st8 [r9]=r0
-+ ;;
-+ XEN_HYPER_SET_ITM
-+ ;;
-+ st8 [r9]=r10
-+ ;;
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_set_itm)
-+
-+GLOBAL_ENTRY(xen_ptcga)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) ptc.ga r32,r33;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r11=XSI_PSR_IC
-+ mov r8=r32
-+ mov r9=r33
-+ ;;
-+ ld8 r10=[r11]
-+ ;;
-+ st8 [r11]=r0
-+ ;;
-+ XEN_HYPER_PTC_GA
-+ ;;
-+ st8 [r11]=r10
-+ ;;
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_ptcga)
-+
-+GLOBAL_ENTRY(xen_get_rr)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov r8=rr[r32];;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r9=XSI_PSR_IC
-+ mov r8=r32
-+ ;;
-+ ld8 r10=[r9]
-+ ;;
-+ st8 [r9]=r0
-+ ;;
-+ XEN_HYPER_GET_RR
-+ ;;
-+ st8 [r9]=r10
-+ ;;
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_get_rr)
-+
-+GLOBAL_ENTRY(xen_set_rr)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov rr[r32]=r33;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ movl r11=XSI_PSR_IC
-+ mov r8=r32
-+ mov r9=r33
-+ ;;
-+ ld8 r10=[r11]
-+ ;;
-+ st8 [r11]=r0
-+ ;;
-+ XEN_HYPER_SET_RR
-+ ;;
-+ st8 [r11]=r10
-+ ;;
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_set_rr)
-+
-+GLOBAL_ENTRY(xen_set_kr)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.ne p7,p0=r8,r0;;
-+(p7) br.cond.spnt.few 1f;
-+ ;;
-+ cmp.eq p7,p0=r8,r0
-+ adds r8=-1,r8;;
-+(p7) mov ar0=r9
-+(p7) br.ret.sptk.many rp;;
-+ cmp.eq p7,p0=r8,r0
-+ adds r8=-1,r8;;
-+(p7) mov ar1=r9
-+(p7) br.ret.sptk.many rp;;
-+ cmp.eq p7,p0=r8,r0
-+ adds r8=-1,r8;;
-+(p7) mov ar2=r9
-+(p7) br.ret.sptk.many rp;;
-+ cmp.eq p7,p0=r8,r0
-+ adds r8=-1,r8;;
-+(p7) mov ar3=r9
-+(p7) br.ret.sptk.many rp;;
-+ cmp.eq p7,p0=r8,r0
-+ adds r8=-1,r8;;
-+(p7) mov ar4=r9
-+(p7) br.ret.sptk.many rp;;
-+ cmp.eq p7,p0=r8,r0
-+ adds r8=-1,r8;;
-+(p7) mov ar5=r9
-+(p7) br.ret.sptk.many rp;;
-+ cmp.eq p7,p0=r8,r0
-+ adds r8=-1,r8;;
-+(p7) mov ar6=r9
-+(p7) br.ret.sptk.many rp;;
-+ cmp.eq p7,p0=r8,r0
-+ adds r8=-1,r8;;
-+(p7) mov ar7=r9
-+(p7) br.ret.sptk.many rp;;
-+
-+1: movl r11=XSI_PSR_IC
-+ mov r8=r32
-+ mov r9=r33
-+ ;;
-+ ld8 r10=[r11]
-+ ;;
-+ st8 [r11]=r0
-+ ;;
-+ XEN_HYPER_SET_KR
-+ ;;
-+ st8 [r11]=r10
-+ ;;
-+ br.ret.sptk.many rp
-+ ;;
-+END(xen_set_rr)
-+
-+GLOBAL_ENTRY(xen_fc)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) fc r32;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ ptc.e r96 // this is a "privified" fc r32
-+ ;;
-+ br.ret.sptk.many rp
-+END(xen_fc)
-+
-+GLOBAL_ENTRY(xen_get_cpuid)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov r8=cpuid[r32];;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ mov r72=rr[r32] // this is a "privified" mov r8=cpuid[r32]
-+ ;;
-+ br.ret.sptk.many rp
-+END(xen_get_cpuid)
-+
-+GLOBAL_ENTRY(xen_get_pmd)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov r8=pmd[r32];;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ mov r72=pmc[r32] // this is a "privified" mov r8=pmd[r32]
-+ ;;
-+ br.ret.sptk.many rp
-+END(xen_get_pmd)
-+
-+#ifdef CONFIG_IA32_SUPPORT
-+GLOBAL_ENTRY(xen_get_eflag)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov r8=ar24;;
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ mov ar24=r72 // this is a "privified" mov r8=ar.eflg
-+ ;;
-+ br.ret.sptk.many rp
-+END(xen_get_eflag)
-+// some bits aren't set if pl!=0, see SDM vol1 3.1.8
-+GLOBAL_ENTRY(xen_set_eflag)
-+ movl r8=running_on_xen;;
-+ ld4 r8=[r8];;
-+ cmp.eq p7,p0=r8,r0;;
-+(p7) mov ar24=r32
-+(p7) br.ret.sptk.many rp
-+ ;;
-+ // FIXME: this remains no-op'd because it generates
-+ // a privileged register (general exception) trap rather than
-+ // a privileged operation fault
-+ //mov ar24=r32
-+ ;;
-+ br.ret.sptk.many rp
-+END(xen_get_eflag)
-+#endif
-diff --git a/arch/ia64/xen/xen_ksyms.c b/arch/ia64/xen/xen_ksyms.c
-new file mode 100644
-index 0000000..83cff0b
---- /dev/null
-+++ b/arch/ia64/xen/xen_ksyms.c
-@@ -0,0 +1,12 @@
-+/*
-+ * Architecture-specific kernel symbols
-+ *
-+ * Don't put any exports here unless it's defined in an assembler file.
-+ * All other exports should be put directly after the definition.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+
-+extern int is_running_on_xen(void);
-+EXPORT_SYMBOL(is_running_on_xen);
-diff --git a/arch/ia64/xen/xenconsole.c b/arch/ia64/xen/xenconsole.c
-new file mode 100644
-index 0000000..e7010d9
---- /dev/null
-+++ b/arch/ia64/xen/xenconsole.c
-@@ -0,0 +1,19 @@
-+#include <linux/config.h>
-+#include <linux/console.h>
-+
-+int
-+early_xen_console_setup (char *cmdline)
-+{
-+#ifdef CONFIG_XEN
-+#ifndef CONFIG_IA64_HP_SIM
-+ extern int running_on_xen;
-+ if (running_on_xen) {
-+ extern struct console hpsim_cons;
-+ hpsim_cons.flags |= CON_BOOT;
-+ register_console(&hpsim_cons);
-+ return 0;
-+ }
-+#endif
-+#endif
-+ return -1;
-+}
-diff --git a/arch/ia64/xen/xenentry.S b/arch/ia64/xen/xenentry.S
-new file mode 100644
-index 0000000..0d1ded9
---- /dev/null
-+++ b/arch/ia64/xen/xenentry.S
-@@ -0,0 +1,850 @@
-+/*
-+ * ia64/xen/entry.S
-+ *
-+ * Alternate kernel routines for Xen. Heavily leveraged from
-+ * ia64/kernel/entry.S
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ * Dan Magenheimer <dan.magenheimer@.hp.com>
-+ */
-+
-+#include <linux/config.h>
-+
-+#include <asm/asmmacro.h>
-+#include <asm/cache.h>
-+#include <asm/errno.h>
-+#include <asm/kregs.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/pgtable.h>
-+#include <asm/percpu.h>
-+#include <asm/processor.h>
-+#include <asm/thread_info.h>
-+#include <asm/unistd.h>
-+
-+#ifdef CONFIG_XEN
-+#include "xenminstate.h"
-+#else
-+#include "minstate.h"
-+#endif
-+
-+/*
-+ * prev_task <- ia64_switch_to(struct task_struct *next)
-+ * With Ingo's new scheduler, interrupts are disabled when this routine gets
-+ * called. The code starting at .map relies on this. The rest of the code
-+ * doesn't care about the interrupt masking status.
-+ */
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_switch_to)
-+ .prologue
-+ alloc r16=ar.pfs,1,0,0,0
-+ movl r22=running_on_xen;;
-+ ld4 r22=[r22];;
-+ cmp.eq p7,p0=r22,r0
-+(p7) br.cond.sptk.many __ia64_switch_to;;
-+#else
-+GLOBAL_ENTRY(ia64_switch_to)
-+ .prologue
-+ alloc r16=ar.pfs,1,0,0,0
-+#endif
-+ DO_SAVE_SWITCH_STACK
-+ .body
-+
-+ adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-+ movl r25=init_task
-+ mov r27=IA64_KR(CURRENT_STACK)
-+ adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
-+ dep r20=0,in0,61,3 // physical address of "next"
-+ ;;
-+ st8 [r22]=sp // save kernel stack pointer of old task
-+ shr.u r26=r20,IA64_GRANULE_SHIFT
-+ cmp.eq p7,p6=r25,in0
-+ ;;
-+#ifdef CONFIG_XEN
-+ movl r8=XSI_PSR_IC
-+ ;;
-+ st4 [r8]=r0 // force psr.ic off for hyperprivop(s)
-+ ;;
-+#endif
-+ /*
-+ * If we've already mapped this task's page, we can skip doing it again.
-+ */
-+(p6) cmp.eq p7,p6=r26,r27
-+(p6) br.cond.dpnt .map
-+ ;;
-+.done:
-+#ifdef CONFIG_XEN
-+ // psr.ic already off
-+ // update "current" application register
-+ mov r8=IA64_KR_CURRENT
-+ mov r9=in0;;
-+ XEN_HYPER_SET_KR
-+ ld8 sp=[r21] // load kernel stack pointer of new task
-+ movl r27=XSI_PSR_IC
-+ mov r8=1
-+ ;;
-+ st4 [r27]=r8 // psr.ic back on
-+ ;;
-+#else
-+(p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!!
-+ ;;
-+(p6) srlz.d
-+ ld8 sp=[r21] // load kernel stack pointer of new task
-+ mov IA64_KR(CURRENT)=in0 // update "current" application register
-+#endif
-+ mov r8=r13 // return pointer to previously running task
-+ mov r13=in0 // set "current" pointer
-+ ;;
-+ DO_LOAD_SWITCH_STACK
-+
-+#ifdef CONFIG_SMP
-+ sync.i // ensure "fc"s done by this CPU are visible on other CPUs
-+#endif
-+ br.ret.sptk.many rp // boogie on out in new context
-+
-+.map:
-+#ifdef CONFIG_XEN
-+ // psr.ic already off
-+#else
-+ rsm psr.ic // interrupts (psr.i) are already disabled here
-+#endif
-+ movl r25=PAGE_KERNEL
-+ ;;
-+ srlz.d
-+ or r23=r25,r20 // construct PA | page properties
-+ mov r25=IA64_GRANULE_SHIFT<<2
-+ ;;
-+#ifdef CONFIG_XEN
-+ movl r8=XSI_ITIR
-+ ;;
-+ st8 [r8]=r25
-+ ;;
-+ movl r8=XSI_IFA
-+ ;;
-+ st8 [r8]=in0 // VA of next task...
-+ ;;
-+ mov r25=IA64_TR_CURRENT_STACK
-+ // remember last page we mapped...
-+ mov r8=IA64_KR_CURRENT_STACK
-+ mov r9=r26;;
-+ XEN_HYPER_SET_KR;;
-+#else
-+ mov cr.itir=r25
-+ mov cr.ifa=in0 // VA of next task...
-+ ;;
-+ mov r25=IA64_TR_CURRENT_STACK
-+ mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
-+#endif
-+ ;;
-+ itr.d dtr[r25]=r23 // wire in new mapping...
-+ br.cond.sptk .done
-+#ifdef CONFIG_XEN
-+END(xen_switch_to)
-+#else
-+END(ia64_switch_to)
-+#endif
-+
-+ /*
-+ * Invoke a system call, but do some tracing before and after the call.
-+ * We MUST preserve the current register frame throughout this routine
-+ * because some system calls (such as ia64_execve) directly
-+ * manipulate ar.pfs.
-+ */
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_trace_syscall)
-+ PT_REGS_UNWIND_INFO(0)
-+ movl r16=running_on_xen;;
-+ ld4 r16=[r16];;
-+ cmp.eq p7,p0=r16,r0
-+(p7) br.cond.sptk.many __ia64_trace_syscall;;
-+#else
-+GLOBAL_ENTRY(ia64_trace_syscall)
-+ PT_REGS_UNWIND_INFO(0)
-+#endif
-+ /*
-+ * We need to preserve the scratch registers f6-f11 in case the system
-+ * call is sigreturn.
-+ */
-+ adds r16=PT(F6)+16,sp
-+ adds r17=PT(F7)+16,sp
-+ ;;
-+ stf.spill [r16]=f6,32
-+ stf.spill [r17]=f7,32
-+ ;;
-+ stf.spill [r16]=f8,32
-+ stf.spill [r17]=f9,32
-+ ;;
-+ stf.spill [r16]=f10
-+ stf.spill [r17]=f11
-+ br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
-+ adds r16=PT(F6)+16,sp
-+ adds r17=PT(F7)+16,sp
-+ ;;
-+ ldf.fill f6=[r16],32
-+ ldf.fill f7=[r17],32
-+ ;;
-+ ldf.fill f8=[r16],32
-+ ldf.fill f9=[r17],32
-+ ;;
-+ ldf.fill f10=[r16]
-+ ldf.fill f11=[r17]
-+ // the syscall number may have changed, so re-load it and re-calculate the
-+ // syscall entry-point:
-+ adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
-+ ;;
-+ ld8 r15=[r15]
-+ mov r3=NR_syscalls - 1
-+ ;;
-+ adds r15=-1024,r15
-+ movl r16=sys_call_table
-+ ;;
-+ shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
-+ cmp.leu p6,p7=r15,r3
-+ ;;
-+(p6) ld8 r20=[r20] // load address of syscall entry point
-+(p7) movl r20=sys_ni_syscall
-+ ;;
-+ mov b6=r20
-+ br.call.sptk.many rp=b6 // do the syscall
-+.strace_check_retval:
-+ cmp.lt p6,p0=r8,r0 // syscall failed?
-+ adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
-+ adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
-+ mov r10=0
-+(p6) br.cond.sptk strace_error // syscall failed ->
-+ ;; // avoid RAW on r10
-+.strace_save_retval:
-+.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
-+.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
-+ br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
-+.ret3: br.cond.sptk .work_pending_syscall_end
-+
-+strace_error:
-+ ld8 r3=[r2] // load pt_regs.r8
-+ sub r9=0,r8 // negate return value to get errno value
-+ ;;
-+ cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
-+ adds r3=16,r2 // r3=&pt_regs.r10
-+ ;;
-+(p6) mov r10=-1
-+(p6) mov r8=r9
-+ br.cond.sptk .strace_save_retval
-+#ifdef CONFIG_XEN
-+END(xen_trace_syscall)
-+#else
-+END(ia64_trace_syscall)
-+#endif
-+
-+/*
-+ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
-+ * need to switch to bank 0 and doesn't restore the scratch registers.
-+ * To avoid leaking kernel bits, the scratch registers are set to
-+ * the following known-to-be-safe values:
-+ *
-+ * r1: restored (global pointer)
-+ * r2: cleared
-+ * r3: 1 (when returning to user-level)
-+ * r8-r11: restored (syscall return value(s))
-+ * r12: restored (user-level stack pointer)
-+ * r13: restored (user-level thread pointer)
-+ * r14: cleared
-+ * r15: restored (syscall #)
-+ * r16-r17: cleared
-+ * r18: user-level b6
-+ * r19: cleared
-+ * r20: user-level ar.fpsr
-+ * r21: user-level b0
-+ * r22: cleared
-+ * r23: user-level ar.bspstore
-+ * r24: user-level ar.rnat
-+ * r25: user-level ar.unat
-+ * r26: user-level ar.pfs
-+ * r27: user-level ar.rsc
-+ * r28: user-level ip
-+ * r29: user-level psr
-+ * r30: user-level cfm
-+ * r31: user-level pr
-+ * f6-f11: cleared
-+ * pr: restored (user-level pr)
-+ * b0: restored (user-level rp)
-+ * b6: restored
-+ * b7: cleared
-+ * ar.unat: restored (user-level ar.unat)
-+ * ar.pfs: restored (user-level ar.pfs)
-+ * ar.rsc: restored (user-level ar.rsc)
-+ * ar.rnat: restored (user-level ar.rnat)
-+ * ar.bspstore: restored (user-level ar.bspstore)
-+ * ar.fpsr: restored (user-level ar.fpsr)
-+ * ar.ccv: cleared
-+ * ar.csd: cleared
-+ * ar.ssd: cleared
-+ */
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_leave_syscall)
-+ PT_REGS_UNWIND_INFO(0)
-+ movl r22=running_on_xen;;
-+ ld4 r22=[r22];;
-+ cmp.eq p7,p0=r22,r0
-+(p7) br.cond.sptk.many __ia64_leave_syscall;;
-+#else
-+ENTRY(ia64_leave_syscall)
-+ PT_REGS_UNWIND_INFO(0)
-+#endif
-+ /*
-+ * work.need_resched etc. mustn't get changed by this CPU before it returns to
-+ * user- or fsys-mode, hence we disable interrupts early on.
-+ *
-+ * p6 controls whether current_thread_info()->flags needs to be check for
-+ * extra work. We always check for extra work when returning to user-level.
-+ * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
-+ * is 0. After extra work processing has been completed, execution
-+ * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
-+ * needs to be redone.
-+ */
-+#ifdef CONFIG_PREEMPT
-+ rsm psr.i // disable interrupts
-+ cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-+(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-+ ;;
-+ .pred.rel.mutex pUStk,pKStk
-+(pKStk) ld4 r21=[r20] // r21 <- preempt_count
-+(pUStk) mov r21=0 // r21 <- 0
-+ ;;
-+ cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
-+#else /* !CONFIG_PREEMPT */
-+#ifdef CONFIG_XEN
-+ movl r2=XSI_PSR_I
-+ ;;
-+(pUStk) st4 [r2]=r0
-+#else
-+(pUStk) rsm psr.i
-+#endif
-+ cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-+(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-+#endif
-+.work_processed_syscall:
-+ adds r2=PT(LOADRS)+16,r12
-+ adds r3=PT(AR_BSPSTORE)+16,r12
-+ adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
-+ ;;
-+(p6) ld4 r31=[r18] // load current_thread_info()->flags
-+ ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
-+ mov b7=r0 // clear b7
-+ ;;
-+ ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
-+ ld8 r18=[r2],PT(R9)-PT(B6) // load b6
-+(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
-+ ;;
-+ mov r16=ar.bsp // M2 get existing backing store pointer
-+(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
-+(p6) br.cond.spnt .work_pending_syscall
-+ ;;
-+ // start restoring the state saved on the kernel stack (struct pt_regs):
-+ ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
-+ ld8 r11=[r3],PT(CR_IIP)-PT(R11)
-+ mov f6=f0 // clear f6
-+ ;;
-+ invala // M0|1 invalidate ALAT
-+#ifdef CONFIG_XEN
-+ movl r29=XSI_PSR_IC
-+ ;;
-+ st8 [r29]=r0 // note: clears both vpsr.i and vpsr.ic!
-+ ;;
-+#else
-+ rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection
-+#endif
-+ mov f9=f0 // clear f9
-+
-+ ld8 r29=[r2],16 // load cr.ipsr
-+ ld8 r28=[r3],16 // load cr.iip
-+ mov f8=f0 // clear f8
-+ ;;
-+ ld8 r30=[r2],16 // M0|1 load cr.ifs
-+ mov.m ar.ssd=r0 // M2 clear ar.ssd
-+ cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
-+ ;;
-+ ld8 r25=[r3],16 // M0|1 load ar.unat
-+ mov.m ar.csd=r0 // M2 clear ar.csd
-+ mov r22=r0 // clear r22
-+ ;;
-+ ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
-+(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
-+ mov f10=f0 // clear f10
-+ ;;
-+ ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
-+ ld8 r27=[r3],PT(PR)-PT(AR_RSC) // load ar.rsc
-+ mov f11=f0 // clear f11
-+ ;;
-+ ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // load ar.rnat (may be garbage)
-+ ld8 r31=[r3],PT(R1)-PT(PR) // load predicates
-+(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
-+ ;;
-+ ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // load ar.fpsr
-+ ld8.fill r1=[r3],16 // load r1
-+(pUStk) mov r17=1
-+ ;;
-+ srlz.d // M0 ensure interruption collection is off
-+ ld8.fill r13=[r3],16
-+ mov f7=f0 // clear f7
-+ ;;
-+ ld8.fill r12=[r2] // restore r12 (sp)
-+ ld8.fill r15=[r3] // restore r15
-+ addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
-+ ;;
-+(pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8
-+(pUStk) st1 [r14]=r17
-+ mov b6=r18 // I0 restore b6
-+ ;;
-+ mov r14=r0 // clear r14
-+ shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
-+(pKStk) br.cond.dpnt.many skip_rbs_switch
-+
-+ mov.m ar.ccv=r0 // clear ar.ccv
-+(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
-+ br.cond.sptk.many rbs_switch
-+#ifdef CONFIG_XEN
-+END(xen_leave_syscall)
-+#else
-+END(ia64_leave_syscall)
-+#endif
-+
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_leave_kernel)
-+ PT_REGS_UNWIND_INFO(0)
-+ movl r22=running_on_xen;;
-+ ld4 r22=[r22];;
-+ cmp.eq p7,p0=r22,r0
-+(p7) br.cond.sptk.many __ia64_leave_kernel;;
-+#else
-+GLOBAL_ENTRY(ia64_leave_kernel)
-+ PT_REGS_UNWIND_INFO(0)
-+#endif
-+ /*
-+ * work.need_resched etc. mustn't get changed by this CPU before it returns to
-+ * user- or fsys-mode, hence we disable interrupts early on.
-+ *
-+ * p6 controls whether current_thread_info()->flags needs to be check for
-+ * extra work. We always check for extra work when returning to user-level.
-+ * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
-+ * is 0. After extra work processing has been completed, execution
-+ * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
-+ * needs to be redone.
-+ */
-+#ifdef CONFIG_PREEMPT
-+ rsm psr.i // disable interrupts
-+ cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
-+(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-+ ;;
-+ .pred.rel.mutex pUStk,pKStk
-+(pKStk) ld4 r21=[r20] // r21 <- preempt_count
-+(pUStk) mov r21=0 // r21 <- 0
-+ ;;
-+ cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
-+#else
-+#ifdef CONFIG_XEN
-+(pUStk) movl r17=XSI_PSR_I
-+ ;;
-+(pUStk) st4 [r17]=r0
-+ ;;
-+#else
-+(pUStk) rsm psr.i
-+#endif
-+ cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
-+(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
-+#endif
-+.work_processed_kernel:
-+ adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
-+ ;;
-+(p6) ld4 r31=[r17] // load current_thread_info()->flags
-+ adds r21=PT(PR)+16,r12
-+ ;;
-+
-+ lfetch [r21],PT(CR_IPSR)-PT(PR)
-+ adds r2=PT(B6)+16,r12
-+ adds r3=PT(R16)+16,r12
-+ ;;
-+ lfetch [r21]
-+ ld8 r28=[r2],8 // load b6
-+ adds r29=PT(R24)+16,r12
-+
-+ ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
-+ adds r30=PT(AR_CCV)+16,r12
-+(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
-+ ;;
-+ ld8.fill r24=[r29]
-+ ld8 r15=[r30] // load ar.ccv
-+(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
-+ ;;
-+ ld8 r29=[r2],16 // load b7
-+ ld8 r30=[r3],16 // load ar.csd
-+(p6) br.cond.spnt .work_pending
-+ ;;
-+ ld8 r31=[r2],16 // load ar.ssd
-+ ld8.fill r8=[r3],16
-+ ;;
-+ ld8.fill r9=[r2],16
-+ ld8.fill r10=[r3],PT(R17)-PT(R10)
-+ ;;
-+ ld8.fill r11=[r2],PT(R18)-PT(R11)
-+ ld8.fill r17=[r3],16
-+ ;;
-+ ld8.fill r18=[r2],16
-+ ld8.fill r19=[r3],16
-+ ;;
-+ ld8.fill r20=[r2],16
-+ ld8.fill r21=[r3],16
-+ mov ar.csd=r30
-+ mov ar.ssd=r31
-+ ;;
-+#ifdef CONFIG_XEN
-+ movl r22=XSI_PSR_IC
-+ ;;
-+ st8 [r22]=r0 // note: clears both vpsr.i and vpsr.ic!
-+ ;;
-+#else
-+ rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
-+#endif
-+ invala // invalidate ALAT
-+ ;;
-+ ld8.fill r22=[r2],24
-+ ld8.fill r23=[r3],24
-+ mov b6=r28
-+ ;;
-+ ld8.fill r25=[r2],16
-+ ld8.fill r26=[r3],16
-+ mov b7=r29
-+ ;;
-+ ld8.fill r27=[r2],16
-+ ld8.fill r28=[r3],16
-+ ;;
-+ ld8.fill r29=[r2],16
-+ ld8.fill r30=[r3],24
-+ ;;
-+ ld8.fill r31=[r2],PT(F9)-PT(R31)
-+ adds r3=PT(F10)-PT(F6),r3
-+ ;;
-+ ldf.fill f9=[r2],PT(F6)-PT(F9)
-+ ldf.fill f10=[r3],PT(F8)-PT(F10)
-+ ;;
-+ ldf.fill f6=[r2],PT(F7)-PT(F6)
-+ ;;
-+ ldf.fill f7=[r2],PT(F11)-PT(F7)
-+ ldf.fill f8=[r3],32
-+ ;;
-+ srlz.i // ensure interruption collection is off
-+ mov ar.ccv=r15
-+ ;;
-+ ldf.fill f11=[r2]
-+#ifdef CONFIG_XEN
-+ ;;
-+ // r16-r31 all now hold bank1 values
-+ movl r2=XSI_BANK1_R16
-+ movl r3=XSI_BANK1_R16+8
-+ ;;
-+ st8.spill [r2]=r16,16
-+ st8.spill [r3]=r17,16
-+ ;;
-+ st8.spill [r2]=r18,16
-+ st8.spill [r3]=r19,16
-+ ;;
-+ st8.spill [r2]=r20,16
-+ st8.spill [r3]=r21,16
-+ ;;
-+ st8.spill [r2]=r22,16
-+ st8.spill [r3]=r23,16
-+ ;;
-+ st8.spill [r2]=r24,16
-+ st8.spill [r3]=r25,16
-+ ;;
-+ st8.spill [r2]=r26,16
-+ st8.spill [r3]=r27,16
-+ ;;
-+ st8.spill [r2]=r28,16
-+ st8.spill [r3]=r29,16
-+ ;;
-+ st8.spill [r2]=r30,16
-+ st8.spill [r3]=r31,16
-+ ;;
-+ movl r2=XSI_BANKNUM;;
-+ st4 [r2]=r0;
-+#else
-+ bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
-+#endif
-+ ;;
-+(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
-+ adds r16=PT(CR_IPSR)+16,r12
-+ adds r17=PT(CR_IIP)+16,r12
-+
-+(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
-+ nop.i 0
-+ nop.i 0
-+ ;;
-+ ld8 r29=[r16],16 // load cr.ipsr
-+ ld8 r28=[r17],16 // load cr.iip
-+ ;;
-+ ld8 r30=[r16],16 // load cr.ifs
-+ ld8 r25=[r17],16 // load ar.unat
-+ ;;
-+ ld8 r26=[r16],16 // load ar.pfs
-+ ld8 r27=[r17],16 // load ar.rsc
-+ cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
-+ ;;
-+ ld8 r24=[r16],16 // load ar.rnat (may be garbage)
-+ ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
-+ ;;
-+ ld8 r31=[r16],16 // load predicates
-+ ld8 r21=[r17],16 // load b0
-+ ;;
-+ ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
-+ ld8.fill r1=[r17],16 // load r1
-+ ;;
-+ ld8.fill r12=[r16],16
-+ ld8.fill r13=[r17],16
-+(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
-+ ;;
-+ ld8 r20=[r16],16 // ar.fpsr
-+ ld8.fill r15=[r17],16
-+ ;;
-+ ld8.fill r14=[r16],16
-+ ld8.fill r2=[r17]
-+(pUStk) mov r17=1
-+ ;;
-+ ld8.fill r3=[r16]
-+(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
-+ shr.u r18=r19,16 // get byte size of existing "dirty" partition
-+ ;;
-+ mov r16=ar.bsp // get existing backing store pointer
-+ addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-+ ;;
-+ ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
-+(pKStk) br.cond.dpnt skip_rbs_switch
-+
-+ /*
-+ * Restore user backing store.
-+ *
-+ * NOTE: alloc, loadrs, and cover can't be predicated.
-+ */
-+(pNonSys) br.cond.dpnt dont_preserve_current_frame
-+
-+rbs_switch:
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_COVER;
-+#else
-+ cover // add current frame into dirty partition and set cr.ifs
-+#endif
-+ ;;
-+ mov r19=ar.bsp // get new backing store pointer
-+ sub r16=r16,r18 // krbs = old bsp - size of dirty partition
-+ cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
-+ ;;
-+ sub r19=r19,r16 // calculate total byte size of dirty partition
-+ add r18=64,r18 // don't force in0-in7 into memory...
-+ ;;
-+ shl r19=r19,16 // shift size of dirty partition into loadrs position
-+ ;;
-+dont_preserve_current_frame:
-+ /*
-+ * To prevent leaking bits between the kernel and user-space,
-+ * we must clear the stacked registers in the "invalid" partition here.
-+ * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
-+ * 5 registers/cycle on McKinley).
-+ */
-+# define pRecurse p6
-+# define pReturn p7
-+#ifdef CONFIG_ITANIUM
-+# define Nregs 10
-+#else
-+# define Nregs 14
-+#endif
-+ alloc loc0=ar.pfs,2,Nregs-2,2,0
-+ shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
-+ sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
-+ ;;
-+ mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
-+ shladd in0=loc1,3,r17
-+ mov in1=0
-+ ;;
-+ TEXT_ALIGN(32)
-+rse_clear_invalid:
-+#ifdef CONFIG_ITANIUM
-+ // cycle 0
-+ { .mii
-+ alloc loc0=ar.pfs,2,Nregs-2,2,0
-+ cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
-+ add out0=-Nregs*8,in0
-+}{ .mfb
-+ add out1=1,in1 // increment recursion count
-+ nop.f 0
-+ nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
-+ ;;
-+}{ .mfi // cycle 1
-+ mov loc1=0
-+ nop.f 0
-+ mov loc2=0
-+}{ .mib
-+ mov loc3=0
-+ mov loc4=0
-+(pRecurse) br.call.sptk.many b0=rse_clear_invalid
-+
-+}{ .mfi // cycle 2
-+ mov loc5=0
-+ nop.f 0
-+ cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
-+}{ .mib
-+ mov loc6=0
-+ mov loc7=0
-+(pReturn) br.ret.sptk.many b0
-+}
-+#else /* !CONFIG_ITANIUM */
-+ alloc loc0=ar.pfs,2,Nregs-2,2,0
-+ cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
-+ add out0=-Nregs*8,in0
-+ add out1=1,in1 // increment recursion count
-+ mov loc1=0
-+ mov loc2=0
-+ ;;
-+ mov loc3=0
-+ mov loc4=0
-+ mov loc5=0
-+ mov loc6=0
-+ mov loc7=0
-+(pRecurse) br.call.sptk.few b0=rse_clear_invalid
-+ ;;
-+ mov loc8=0
-+ mov loc9=0
-+ cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
-+ mov loc10=0
-+ mov loc11=0
-+(pReturn) br.ret.sptk.many b0
-+#endif /* !CONFIG_ITANIUM */
-+# undef pRecurse
-+# undef pReturn
-+ ;;
-+ alloc r17=ar.pfs,0,0,0,0 // drop current register frame
-+ ;;
-+ loadrs
-+ ;;
-+skip_rbs_switch:
-+ mov ar.unat=r25 // M2
-+(pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
-+(pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
-+ ;;
-+(pUStk) mov ar.bspstore=r23 // M2
-+(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
-+(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
-+ ;;
-+#ifdef CONFIG_XEN
-+ movl r25=XSI_IPSR
-+ ;;
-+ st8[r25]=r29,XSI_IFS-XSI_IPSR
-+ ;;
-+#else
-+ mov cr.ipsr=r29 // M2
-+#endif
-+ mov ar.pfs=r26 // I0
-+(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
-+
-+#ifdef CONFIG_XEN
-+(p9) st8 [r25]=r30
-+ ;;
-+ adds r25=XSI_IIP-XSI_IFS,r25
-+ ;;
-+#else
-+(p9) mov cr.ifs=r30 // M2
-+#endif
-+ mov b0=r21 // I0
-+(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
-+
-+ mov ar.fpsr=r20 // M2
-+#ifdef CONFIG_XEN
-+ st8 [r25]=r28
-+#else
-+ mov cr.iip=r28 // M2
-+#endif
-+ nop 0
-+ ;;
-+(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
-+ nop 0
-+(pLvSys)mov r2=r0
-+
-+ mov ar.rsc=r27 // M2
-+ mov pr=r31,-1 // I0
-+#ifdef CONFIG_XEN
-+ ;;
-+ XEN_HYPER_RFI;
-+#else
-+ rfi // B
-+#endif
-+
-+ /*
-+ * On entry:
-+ * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
-+ * r31 = current->thread_info->flags
-+ * On exit:
-+ * p6 = TRUE if work-pending-check needs to be redone
-+ */
-+.work_pending_syscall:
-+ add r2=-8,r2
-+ add r3=-8,r3
-+ ;;
-+ st8 [r2]=r8
-+ st8 [r3]=r10
-+.work_pending:
-+ tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context?
-+(p6) br.cond.sptk.few .sigdelayed
-+ ;;
-+ tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
-+(p6) br.cond.sptk.few .notify
-+#ifdef CONFIG_PREEMPT
-+(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
-+ ;;
-+(pKStk) st4 [r20]=r21
-+ ssm psr.i // enable interrupts
-+#endif
-+ br.call.spnt.many rp=schedule
-+.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
-+#ifdef CONFIG_XEN
-+ movl r2=XSI_PSR_I
-+ ;;
-+ st4 [r2]=r0
-+#else
-+ rsm psr.i // disable interrupts
-+#endif
-+ ;;
-+#ifdef CONFIG_PREEMPT
-+(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-+ ;;
-+(pKStk) st4 [r20]=r0 // preempt_count() <- 0
-+#endif
-+(pLvSys)br.cond.sptk.few .work_pending_syscall_end
-+ br.cond.sptk.many .work_processed_kernel // re-check
-+
-+.notify:
-+(pUStk) br.call.spnt.many rp=notify_resume_user
-+.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
-+(pLvSys)br.cond.sptk.few .work_pending_syscall_end
-+ br.cond.sptk.many .work_processed_kernel // don't re-check
-+
-+// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
-+// it could not be delivered. Deliver it now. The signal might be for us and
-+// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
-+// signal.
-+
-+.sigdelayed:
-+ br.call.sptk.many rp=do_sigdelayed
-+ cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check
-+(pLvSys)br.cond.sptk.few .work_pending_syscall_end
-+ br.cond.sptk.many .work_processed_kernel // re-check
-+
-+.work_pending_syscall_end:
-+ adds r2=PT(R8)+16,r12
-+ adds r3=PT(R10)+16,r12
-+ ;;
-+ ld8 r8=[r2]
-+ ld8 r10=[r3]
-+ br.cond.sptk.many .work_processed_syscall // re-check
-+
-+#ifdef CONFIG_XEN
-+END(xen_leave_kernel)
-+#else
-+END(ia64_leave_kernel)
-+#endif
-diff --git a/arch/ia64/xen/xenhpski.c b/arch/ia64/xen/xenhpski.c
-new file mode 100644
-index 0000000..3bc6cdb
---- /dev/null
-+++ b/arch/ia64/xen/xenhpski.c
-@@ -0,0 +1,19 @@
-+
-+extern unsigned long xen_get_cpuid(int);
-+
-+int
-+running_on_sim(void)
-+{
-+ int i;
-+ long cpuid[6];
-+
-+ for (i = 0; i < 5; ++i)
-+ cpuid[i] = xen_get_cpuid(i);
-+ if ((cpuid[0] & 0xff) != 'H') return 0;
-+ if ((cpuid[3] & 0xff) != 0x4) return 0;
-+ if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
-+ if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
-+ if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
-+ return 1;
-+}
-+
-diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S
-new file mode 100644
-index 0000000..d53b52a
---- /dev/null
-+++ b/arch/ia64/xen/xenivt.S
-@@ -0,0 +1,2044 @@
-+/*
-+ * arch/ia64/xen/ivt.S
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ * Dan Magenheimer <dan.magenheimer@hp.com>
-+ */
-+/*
-+ * This file defines the interruption vector table used by the CPU.
-+ * It does not include one entry per possible cause of interruption.
-+ *
-+ * The first 20 entries of the table contain 64 bundles each while the
-+ * remaining 48 entries contain only 16 bundles each.
-+ *
-+ * The 64 bundles are used to allow inlining the whole handler for critical
-+ * interruptions like TLB misses.
-+ *
-+ * For each entry, the comment is as follows:
-+ *
-+ * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-+ * entry offset ----/ / / / /
-+ * entry number ---------/ / / /
-+ * size of the entry -------------/ / /
-+ * vector name -------------------------------------/ /
-+ * interruptions triggering this vector ----------------------/
-+ *
-+ * The table is 32KB in size and must be aligned on 32KB boundary.
-+ * (The CPU ignores the 15 lower bits of the address)
-+ *
-+ * Table is based upon EAS2.6 (Oct 1999)
-+ */
-+
-+#include <linux/config.h>
-+
-+#include <asm/asmmacro.h>
-+#include <asm/break.h>
-+#include <asm/ia32.h>
-+#include <asm/kregs.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/pgtable.h>
-+#include <asm/processor.h>
-+#include <asm/ptrace.h>
-+#include <asm/system.h>
-+#include <asm/thread_info.h>
-+#include <asm/unistd.h>
-+#include <asm/errno.h>
-+
-+#ifdef CONFIG_XEN
-+#define ia64_ivt xen_ivt
-+#endif
-+
-+#if 1
-+# define PSR_DEFAULT_BITS psr.ac
-+#else
-+# define PSR_DEFAULT_BITS 0
-+#endif
-+
-+#if 0
-+ /*
-+ * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
-+ * needed for something else before enabling this...
-+ */
-+# define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
-+#else
-+# define DBG_FAULT(i)
-+#endif
-+
-+#define MINSTATE_VIRT /* needed by minstate.h */
-+#include "xenminstate.h"
-+
-+#define FAULT(n) \
-+ mov r31=pr; \
-+ mov r19=n;; /* prepare to save predicates */ \
-+ br.sptk.many dispatch_to_fault_handler
-+
-+ .section .text.ivt,"ax"
-+
-+ .align 32768 // align on 32KB boundary
-+ .global ia64_ivt
-+ia64_ivt:
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
-+ENTRY(vhpt_miss)
-+ DBG_FAULT(0)
-+ /*
-+ * The VHPT vector is invoked when the TLB entry for the virtual page table
-+ * is missing. This happens only as a result of a previous
-+ * (the "original") TLB miss, which may either be caused by an instruction
-+ * fetch or a data access (or non-access).
-+ *
-+ * What we do here is normal TLB miss handing for the _original_ miss, followed
-+ * by inserting the TLB entry for the virtual page table page that the VHPT
-+ * walker was attempting to access. The latter gets inserted as long
-+ * as both L1 and L2 have valid mappings for the faulting address.
-+ * The TLB entry for the original miss gets inserted only if
-+ * the L3 entry indicates that the page is present.
-+ *
-+ * do_page_fault gets invoked in the following cases:
-+ * - the faulting virtual address uses unimplemented address bits
-+ * - the faulting virtual address has no L1, L2, or L3 mapping
-+ */
-+#ifdef CONFIG_XEN
-+ movl r16=XSI_IFA
-+ ;;
-+ ld8 r16=[r16]
-+#ifdef CONFIG_HUGETLB_PAGE
-+ movl r18=PAGE_SHIFT
-+ movl r25=XSI_ITIR
-+ ;;
-+ ld8 r25=[r25]
-+#endif
-+ ;;
-+#else
-+ mov r16=cr.ifa // get address that caused the TLB miss
-+#ifdef CONFIG_HUGETLB_PAGE
-+ movl r18=PAGE_SHIFT
-+ mov r25=cr.itir
-+#endif
-+#endif
-+ ;;
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RSM_PSR_DT;
-+#else
-+ rsm psr.dt // use physical addressing for data
-+#endif
-+ mov r31=pr // save the predicate registers
-+ mov r19=IA64_KR(PT_BASE) // get page table base address
-+ shl r21=r16,3 // shift bit 60 into sign bit
-+ shr.u r17=r16,61 // get the region number into r17
-+ ;;
-+ shr r22=r21,3
-+#ifdef CONFIG_HUGETLB_PAGE
-+ extr.u r26=r25,2,6
-+ ;;
-+ cmp.ne p8,p0=r18,r26
-+ sub r27=r26,r18
-+ ;;
-+(p8) dep r25=r18,r25,2,6
-+(p8) shr r22=r22,r27
-+#endif
-+ ;;
-+ cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
-+ shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
-+ ;;
-+(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
-+
-+ srlz.d
-+ LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
-+
-+ .pred.rel "mutex", p6, p7
-+(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
-+(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
-+ ;;
-+(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
-+(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
-+ cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
-+ shr.u r18=r22,PMD_SHIFT // shift L2 index into position
-+ ;;
-+ ld8 r17=[r17] // fetch the L1 entry (may be 0)
-+ ;;
-+(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
-+ dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
-+ ;;
-+(p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
-+ shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
-+ ;;
-+(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
-+ dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
-+ ;;
-+#ifdef CONFIG_XEN
-+(p7) ld8 r18=[r21] // read the L3 PTE
-+ movl r19=XSI_ISR
-+ ;;
-+ ld8 r19=[r19]
-+ ;;
-+(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
-+ movl r22=XSI_IHA
-+ ;;
-+ ld8 r22=[r22]
-+ ;;
-+#else
-+(p7) ld8 r18=[r21] // read the L3 PTE
-+ mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss
-+ ;;
-+(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
-+ mov r22=cr.iha // get the VHPT address that caused the TLB miss
-+ ;; // avoid RAW on p7
-+#endif
-+(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
-+ dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
-+ ;;
-+#ifdef CONFIG_XEN
-+ mov r24=r8
-+ mov r8=r18
-+ ;;
-+(p10) XEN_HYPER_ITC_D
-+ ;;
-+(p11) XEN_HYPER_ITC_I
-+ ;;
-+ mov r8=r24
-+ ;;
-+(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
-+ ;;
-+ movl r24=XSI_IFA
-+ ;;
-+ st8 [r24]=r22
-+ ;;
-+#else
-+(p10) itc.i r18 // insert the instruction TLB entry
-+(p11) itc.d r18 // insert the data TLB entry
-+(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
-+ mov cr.ifa=r22
-+#endif
-+
-+#ifdef CONFIG_HUGETLB_PAGE
-+(p8) mov cr.itir=r25 // change to default page-size for VHPT
-+#endif
-+
-+ /*
-+ * Now compute and insert the TLB entry for the virtual page table. We never
-+ * execute in a page table page so there is no need to set the exception deferral
-+ * bit.
-+ */
-+ adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
-+ ;;
-+#ifdef CONFIG_XEN
-+(p7) mov r25=r8
-+(p7) mov r8=r24
-+ ;;
-+(p7) XEN_HYPER_ITC_D
-+ ;;
-+(p7) mov r8=r25
-+ ;;
-+#else
-+(p7) itc.d r24
-+#endif
-+ ;;
-+#ifdef CONFIG_SMP
-+ /*
-+ * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+ * cannot possibly affect the following loads:
-+ */
-+ dv_serialize_data
-+
-+ /*
-+ * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g
-+ * between reading the pagetable and the "itc". If so, flush the entry we
-+ * inserted and retry.
-+ */
-+ ld8 r25=[r21] // read L3 PTE again
-+ ld8 r26=[r17] // read L2 entry again
-+ ;;
-+ cmp.ne p6,p7=r26,r20 // did L2 entry change
-+ mov r27=PAGE_SHIFT<<2
-+ ;;
-+(p6) ptc.l r22,r27 // purge PTE page translation
-+(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
-+ ;;
-+(p6) ptc.l r16,r27 // purge translation
-+#endif
-+
-+ mov pr=r31,-1 // restore predicate registers
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RFI;
-+#else
-+ rfi
-+#endif
-+END(vhpt_miss)
-+
-+ .org ia64_ivt+0x400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
-+ENTRY(itlb_miss)
-+ DBG_FAULT(1)
-+ /*
-+ * The ITLB handler accesses the L3 PTE via the virtually mapped linear
-+ * page table. If a nested TLB miss occurs, we switch into physical
-+ * mode, walk the page table, and then re-execute the L3 PTE read
-+ * and go on normally after that.
-+ */
-+#ifdef CONFIG_XEN
-+ movl r16=XSI_IFA
-+ ;;
-+ ld8 r16=[r16]
-+#else
-+ mov r16=cr.ifa // get virtual address
-+#endif
-+ mov r29=b0 // save b0
-+ mov r31=pr // save predicates
-+.itlb_fault:
-+#ifdef CONFIG_XEN
-+ movl r17=XSI_IHA
-+ ;;
-+ ld8 r17=[r17] // get virtual address of L3 PTE
-+#else
-+ mov r17=cr.iha // get virtual address of L3 PTE
-+#endif
-+ movl r30=1f // load nested fault continuation point
-+ ;;
-+1: ld8 r18=[r17] // read L3 PTE
-+ ;;
-+ mov b0=r29
-+ tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
-+(p6) br.cond.spnt page_fault
-+ ;;
-+#ifdef CONFIG_XEN
-+ mov r19=r8
-+ mov r8=r18
-+ ;;
-+ XEN_HYPER_ITC_I
-+ ;;
-+ mov r8=r19
-+#else
-+ itc.i r18
-+#endif
-+ ;;
-+#ifdef CONFIG_SMP
-+ /*
-+ * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+ * cannot possibly affect the following loads:
-+ */
-+ dv_serialize_data
-+
-+ ld8 r19=[r17] // read L3 PTE again and see if same
-+ mov r20=PAGE_SHIFT<<2 // setup page size for purge
-+ ;;
-+ cmp.ne p7,p0=r18,r19
-+ ;;
-+(p7) ptc.l r16,r20
-+#endif
-+ mov pr=r31,-1
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RFI;
-+#else
-+ rfi
-+#endif
-+END(itlb_miss)
-+
-+ .org ia64_ivt+0x0800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
-+ENTRY(dtlb_miss)
-+ DBG_FAULT(2)
-+ /*
-+ * The DTLB handler accesses the L3 PTE via the virtually mapped linear
-+ * page table. If a nested TLB miss occurs, we switch into physical
-+ * mode, walk the page table, and then re-execute the L3 PTE read
-+ * and go on normally after that.
-+ */
-+#ifdef CONFIG_XEN
-+ movl r16=XSI_IFA
-+ ;;
-+ ld8 r16=[r16]
-+#else
-+ mov r16=cr.ifa // get virtual address
-+#endif
-+ mov r29=b0 // save b0
-+ mov r31=pr // save predicates
-+dtlb_fault:
-+#ifdef CONFIG_XEN
-+ movl r17=XSI_IHA
-+ ;;
-+ ld8 r17=[r17] // get virtual address of L3 PTE
-+#else
-+ mov r17=cr.iha // get virtual address of L3 PTE
-+#endif
-+ movl r30=1f // load nested fault continuation point
-+ ;;
-+1: ld8 r18=[r17] // read L3 PTE
-+ ;;
-+ mov b0=r29
-+ tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
-+(p6) br.cond.spnt page_fault
-+ ;;
-+#ifdef CONFIG_XEN
-+ mov r19=r8
-+ mov r8=r18
-+ ;;
-+ XEN_HYPER_ITC_D
-+ ;;
-+ mov r8=r19
-+ ;;
-+#else
-+ itc.d r18
-+#endif
-+ ;;
-+#ifdef CONFIG_SMP
-+ /*
-+ * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+ * cannot possibly affect the following loads:
-+ */
-+ dv_serialize_data
-+
-+ ld8 r19=[r17] // read L3 PTE again and see if same
-+ mov r20=PAGE_SHIFT<<2 // setup page size for purge
-+ ;;
-+ cmp.ne p7,p0=r18,r19
-+ ;;
-+(p7) ptc.l r16,r20
-+#endif
-+ mov pr=r31,-1
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RFI;
-+#else
-+ rfi
-+#endif
-+END(dtlb_miss)
-+
-+ .org ia64_ivt+0x0c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
-+ENTRY(alt_itlb_miss)
-+ DBG_FAULT(3)
-+#ifdef CONFIG_XEN
-+ movl r31=XSI_IPSR
-+ ;;
-+ ld8 r21=[r31],XSI_IFA-XSI_IPSR // get ipsr, point to ifa
-+ movl r17=PAGE_KERNEL
-+ ;;
-+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-+ ;;
-+ ld8 r16=[r31] // get ifa
-+ mov r31=pr
-+ ;;
-+#else
-+ mov r16=cr.ifa // get address that caused the TLB miss
-+ movl r17=PAGE_KERNEL
-+ mov r21=cr.ipsr
-+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-+ mov r31=pr
-+ ;;
-+#endif
-+#ifdef CONFIG_DISABLE_VHPT
-+ shr.u r22=r16,61 // get the region number into r21
-+ ;;
-+ cmp.gt p8,p0=6,r22 // user mode
-+ ;;
-+#ifndef CONFIG_XEN
-+(p8) thash r17=r16
-+ ;;
-+(p8) mov cr.iha=r17
-+#endif
-+(p8) mov r29=b0 // save b0
-+(p8) br.cond.dptk .itlb_fault
-+#endif
-+ extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
-+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
-+ shr.u r18=r16,57 // move address bit 61 to bit 4
-+ ;;
-+ andcm r18=0x10,r18 // bit 4=~address-bit(61)
-+ cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
-+ or r19=r17,r19 // insert PTE control bits into r19
-+ ;;
-+ or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
-+(p8) br.cond.spnt page_fault
-+ ;;
-+#ifdef CONFIG_XEN
-+ mov r18=r8
-+ mov r8=r19
-+ ;;
-+ XEN_HYPER_ITC_I
-+ ;;
-+ mov r8=r18
-+ ;;
-+ mov pr=r31,-1
-+ ;;
-+ XEN_HYPER_RFI;
-+#else
-+ itc.i r19 // insert the TLB entry
-+ mov pr=r31,-1
-+ rfi
-+#endif
-+END(alt_itlb_miss)
-+
-+ .org ia64_ivt+0x1000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
-+ENTRY(alt_dtlb_miss)
-+ DBG_FAULT(4)
-+#ifdef CONFIG_XEN
-+ movl r31=XSI_IPSR
-+ ;;
-+ ld8 r21=[r31],XSI_ISR-XSI_IPSR // get ipsr, point to isr
-+ movl r17=PAGE_KERNEL
-+ ;;
-+ ld8 r20=[r31],XSI_IFA-XSI_ISR // get isr, point to ifa
-+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-+ ;;
-+ ld8 r16=[r31] // get ifa
-+ mov r31=pr
-+ ;;
-+#else
-+ mov r16=cr.ifa // get address that caused the TLB miss
-+ movl r17=PAGE_KERNEL
-+ mov r20=cr.isr
-+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-+ mov r21=cr.ipsr
-+ mov r31=pr
-+ ;;
-+#endif
-+#ifdef CONFIG_DISABLE_VHPT
-+ shr.u r22=r16,61 // get the region number into r21
-+ ;;
-+ cmp.gt p8,p0=6,r22 // access to region 0-5
-+ ;;
-+#ifndef CONFIG_XEN
-+(p8) thash r17=r16
-+ ;;
-+(p8) mov cr.iha=r17
-+#endif
-+(p8) mov r29=b0 // save b0
-+(p8) br.cond.dptk dtlb_fault
-+#endif
-+ extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
-+ and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
-+ tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
-+ shr.u r18=r16,57 // move address bit 61 to bit 4
-+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
-+ tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
-+ ;;
-+ andcm r18=0x10,r18 // bit 4=~address-bit(61)
-+ cmp.ne p8,p0=r0,r23
-+(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
-+(p8) br.cond.spnt page_fault
-+
-+ dep r21=-1,r21,IA64_PSR_ED_BIT,1
-+ or r19=r19,r17 // insert PTE control bits into r19
-+ ;;
-+ or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
-+(p6) mov cr.ipsr=r21
-+ ;;
-+#ifdef CONFIG_XEN
-+(p7) mov r18=r8
-+(p7) mov r8=r19
-+ ;;
-+(p7) XEN_HYPER_ITC_D
-+ ;;
-+(p7) mov r8=r18
-+ ;;
-+ mov pr=r31,-1
-+ ;;
-+ XEN_HYPER_RFI;
-+#else
-+(p7) itc.d r19 // insert the TLB entry
-+ mov pr=r31,-1
-+ rfi
-+#endif
-+END(alt_dtlb_miss)
-+
-+ .org ia64_ivt+0x1400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
-+ENTRY(nested_dtlb_miss)
-+ /*
-+ * In the absence of kernel bugs, we get here when the virtually mapped linear
-+ * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
-+ * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
-+ * table is missing, a nested TLB miss fault is triggered and control is
-+ * transferred to this point. When this happens, we lookup the pte for the
-+ * faulting address by walking the page table in physical mode and return to the
-+ * continuation point passed in register r30 (or call page_fault if the address is
-+ * not mapped).
-+ *
-+ * Input: r16: faulting address
-+ * r29: saved b0
-+ * r30: continuation address
-+ * r31: saved pr
-+ *
-+ * Output: r17: physical address of L3 PTE of faulting address
-+ * r29: saved b0
-+ * r30: continuation address
-+ * r31: saved pr
-+ *
-+ * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
-+ */
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RSM_PSR_DT;
-+#else
-+ rsm psr.dt // switch to using physical data addressing
-+#endif
-+ mov r19=IA64_KR(PT_BASE) // get the page table base address
-+ shl r21=r16,3 // shift bit 60 into sign bit
-+ ;;
-+ shr.u r17=r16,61 // get the region number into r17
-+ ;;
-+ cmp.eq p6,p7=5,r17 // is faulting address in region 5?
-+ shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
-+ ;;
-+(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
-+
-+ srlz.d
-+ LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
-+
-+ .pred.rel "mutex", p6, p7
-+(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
-+(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
-+ ;;
-+(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
-+(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
-+ cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
-+ shr.u r18=r16,PMD_SHIFT // shift L2 index into position
-+ ;;
-+ ld8 r17=[r17] // fetch the L1 entry (may be 0)
-+ ;;
-+(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
-+ dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
-+ ;;
-+(p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
-+ shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
-+ ;;
-+(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
-+ dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
-+(p6) br.cond.spnt page_fault
-+ mov b0=r30
-+ br.sptk.many b0 // return to continuation point
-+END(nested_dtlb_miss)
-+
-+ .org ia64_ivt+0x1800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
-+ENTRY(ikey_miss)
-+ DBG_FAULT(6)
-+ FAULT(6)
-+END(ikey_miss)
-+
-+ //-----------------------------------------------------------------------------------
-+ // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
-+ENTRY(page_fault)
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_SSM_PSR_DT;
-+#else
-+ ssm psr.dt
-+ ;;
-+ srlz.i
-+#endif
-+ ;;
-+ SAVE_MIN_WITH_COVER
-+ alloc r15=ar.pfs,0,0,3,0
-+#ifdef CONFIG_XEN
-+ movl r3=XSI_ISR
-+ ;;
-+ ld8 out1=[r3],XSI_IFA-XSI_ISR // get vcr.isr, point to ifa
-+ ;;
-+ ld8 out0=[r3] // get vcr.ifa
-+ mov r14=1
-+ ;;
-+ add r3=XSI_PSR_IC-XSI_IFA, r3 // point to vpsr.ic
-+ ;;
-+ st4 [r3]=r14 // vpsr.ic = 1
-+ adds r3=8,r2 // set up second base pointer
-+ ;;
-+#else
-+ mov out0=cr.ifa
-+ mov out1=cr.isr
-+ adds r3=8,r2 // set up second base pointer
-+ ;;
-+ ssm psr.ic | PSR_DEFAULT_BITS
-+ ;;
-+ srlz.i // guarantee that interruption collectin is on
-+ ;;
-+#endif
-+#ifdef CONFIG_XEN
-+ br.cond.sptk.many xen_page_fault
-+ ;;
-+done_xen_page_fault:
-+#endif
-+(p15) ssm psr.i // restore psr.i
-+ movl r14=ia64_leave_kernel
-+ ;;
-+ SAVE_REST
-+ mov rp=r14
-+ ;;
-+ adds out2=16,r12 // out2 = pointer to pt_regs
-+ br.call.sptk.many b6=ia64_do_page_fault // ignore return address
-+END(page_fault)
-+
-+ .org ia64_ivt+0x1c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-+ENTRY(dkey_miss)
-+ DBG_FAULT(7)
-+ FAULT(7)
-+#ifdef CONFIG_XEN
-+ // Leaving this code inline above results in an IVT section overflow
-+ // There is no particular reason for this code to be here...
-+xen_page_fault:
-+(p15) movl r3=XSI_PSR_I
-+ ;;
-+(p15) st4 [r3]=r14,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1
-+ mov r14=r0
-+ ;;
-+(p15) ld4 r14=[r3] // if (pending_interrupts)
-+ adds r3=8,r2 // re-set up second base pointer
-+ ;;
-+(p15) cmp.ne p15,p0=r14,r0
-+ ;;
-+ br.cond.sptk.many done_xen_page_fault
-+ ;;
-+#endif
-+END(dkey_miss)
-+
-+ .org ia64_ivt+0x2000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
-+ENTRY(dirty_bit)
-+ DBG_FAULT(8)
-+ /*
-+ * What we do here is to simply turn on the dirty bit in the PTE. We need to
-+ * update both the page-table and the TLB entry. To efficiently access the PTE,
-+ * we address it through the virtual page table. Most likely, the TLB entry for
-+ * the relevant virtual page table page is still present in the TLB so we can
-+ * normally do this without additional TLB misses. In case the necessary virtual
-+ * page table TLB entry isn't present, we take a nested TLB miss hit where we look
-+ * up the physical address of the L3 PTE and then continue at label 1 below.
-+ */
-+#ifdef CONFIG_XEN
-+ movl r16=XSI_IFA
-+ ;;
-+ ld8 r16=[r16]
-+ ;;
-+#else
-+ mov r16=cr.ifa // get the address that caused the fault
-+#endif
-+ movl r30=1f // load continuation point in case of nested fault
-+ ;;
-+#ifdef CONFIG_XEN
-+#if 1
-+ mov r18=r8;
-+ mov r8=r16;
-+ XEN_HYPER_THASH;;
-+ mov r17=r8;
-+ mov r8=r18;;
-+#else
-+ tak r17=r80 // "privified" thash
-+#endif
-+#else
-+ thash r17=r16 // compute virtual address of L3 PTE
-+#endif
-+ mov r29=b0 // save b0 in case of nested fault
-+ mov r31=pr // save pr
-+#ifdef CONFIG_SMP
-+ mov r28=ar.ccv // save ar.ccv
-+ ;;
-+1: ld8 r18=[r17]
-+ ;; // avoid RAW on r18
-+ mov ar.ccv=r18 // set compare value for cmpxchg
-+ or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
-+ ;;
-+ cmpxchg8.acq r26=[r17],r25,ar.ccv
-+ mov r24=PAGE_SHIFT<<2
-+ ;;
-+ cmp.eq p6,p7=r26,r18
-+ ;;
-+(p6) itc.d r25 // install updated PTE
-+ ;;
-+ /*
-+ * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+ * cannot possibly affect the following loads:
-+ */
-+ dv_serialize_data
-+
-+ ld8 r18=[r17] // read PTE again
-+ ;;
-+ cmp.eq p6,p7=r18,r25 // is it same as the newly installed
-+ ;;
-+(p7) ptc.l r16,r24
-+ mov b0=r29 // restore b0
-+ mov ar.ccv=r28
-+#else
-+ ;;
-+1: ld8 r18=[r17]
-+ ;; // avoid RAW on r18
-+ or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
-+ mov b0=r29 // restore b0
-+ ;;
-+ st8 [r17]=r18 // store back updated PTE
-+ itc.d r18 // install updated PTE
-+#endif
-+ mov pr=r31,-1 // restore pr
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RFI;
-+#else
-+ rfi
-+#endif
-+END(dirty_bit)
-+
-+ .org ia64_ivt+0x2400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
-+ENTRY(iaccess_bit)
-+ DBG_FAULT(9)
-+ // Like Entry 8, except for instruction access
-+#ifdef CONFIG_XEN
-+ movl r16=XSI_IFA
-+ ;;
-+ ld8 r16=[r16]
-+ ;;
-+#else
-+ mov r16=cr.ifa // get the address that caused the fault
-+#endif
-+ movl r30=1f // load continuation point in case of nested fault
-+ mov r31=pr // save predicates
-+#ifdef CONFIG_ITANIUM
-+ /*
-+ * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
-+ */
-+ mov r17=cr.ipsr
-+ ;;
-+ mov r18=cr.iip
-+ tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
-+ ;;
-+(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
-+#endif /* CONFIG_ITANIUM */
-+ ;;
-+#ifdef CONFIG_XEN
-+#if 1
-+ mov r18=r8;
-+ mov r8=r16;
-+ XEN_HYPER_THASH;;
-+ mov r17=r8;
-+ mov r8=r18;;
-+#else
-+ tak r17=r80 // "privified" thash
-+#endif
-+#else
-+ thash r17=r16 // compute virtual address of L3 PTE
-+#endif
-+ mov r29=b0 // save b0 in case of nested fault)
-+#ifdef CONFIG_SMP
-+ mov r28=ar.ccv // save ar.ccv
-+ ;;
-+1: ld8 r18=[r17]
-+ ;;
-+ mov ar.ccv=r18 // set compare value for cmpxchg
-+ or r25=_PAGE_A,r18 // set the accessed bit
-+ ;;
-+ cmpxchg8.acq r26=[r17],r25,ar.ccv
-+ mov r24=PAGE_SHIFT<<2
-+ ;;
-+ cmp.eq p6,p7=r26,r18
-+ ;;
-+#ifdef CONFIG_XEN
-+ mov r26=r8
-+ mov r8=r25
-+ ;;
-+(p6) XEN_HYPER_ITC_I
-+ ;;
-+ mov r8=r26
-+ ;;
-+#else
-+(p6) itc.i r25 // install updated PTE
-+#endif
-+ ;;
-+ /*
-+ * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+ * cannot possibly affect the following loads:
-+ */
-+ dv_serialize_data
-+
-+ ld8 r18=[r17] // read PTE again
-+ ;;
-+ cmp.eq p6,p7=r18,r25 // is it same as the newly installed
-+ ;;
-+(p7) ptc.l r16,r24
-+ mov b0=r29 // restore b0
-+ mov ar.ccv=r28
-+#else /* !CONFIG_SMP */
-+ ;;
-+1: ld8 r18=[r17]
-+ ;;
-+ or r18=_PAGE_A,r18 // set the accessed bit
-+ mov b0=r29 // restore b0
-+ ;;
-+ st8 [r17]=r18 // store back updated PTE
-+ itc.i r18 // install updated PTE
-+#endif /* !CONFIG_SMP */
-+ mov pr=r31,-1
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RFI;
-+#else
-+ rfi
-+#endif
-+END(iaccess_bit)
-+
-+ .org ia64_ivt+0x2800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
-+ENTRY(daccess_bit)
-+ DBG_FAULT(10)
-+ // Like Entry 8, except for data access
-+#ifdef CONFIG_XEN
-+ movl r16=XSI_IFA
-+ ;;
-+ ld8 r16=[r16]
-+ ;;
-+#else
-+ mov r16=cr.ifa // get the address that caused the fault
-+#endif
-+ movl r30=1f // load continuation point in case of nested fault
-+ ;;
-+#ifdef CONFIG_XEN
-+#if 1
-+ mov r18=r8;
-+ mov r8=r16;
-+ XEN_HYPER_THASH;;
-+ mov r17=r8;
-+ mov r8=r18;;
-+#else
-+ tak r17=r80 // "privified" thash
-+#endif
-+#else
-+ thash r17=r16 // compute virtual address of L3 PTE
-+#endif
-+ mov r31=pr
-+ mov r29=b0 // save b0 in case of nested fault)
-+#ifdef CONFIG_SMP
-+ mov r28=ar.ccv // save ar.ccv
-+ ;;
-+1: ld8 r18=[r17]
-+ ;; // avoid RAW on r18
-+ mov ar.ccv=r18 // set compare value for cmpxchg
-+ or r25=_PAGE_A,r18 // set the dirty bit
-+ ;;
-+ cmpxchg8.acq r26=[r17],r25,ar.ccv
-+ mov r24=PAGE_SHIFT<<2
-+ ;;
-+ cmp.eq p6,p7=r26,r18
-+ ;;
-+#ifdef CONFIG_XEN
-+ mov r26=r8
-+ mov r8=r25
-+ ;;
-+(p6) XEN_HYPER_ITC_D
-+ ;;
-+ mov r8=r26
-+ ;;
-+#else
-+(p6) itc.d r25 // install updated PTE
-+#endif
-+ /*
-+ * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+ * cannot possibly affect the following loads:
-+ */
-+ dv_serialize_data
-+ ;;
-+ ld8 r18=[r17] // read PTE again
-+ ;;
-+ cmp.eq p6,p7=r18,r25 // is it same as the newly installed
-+ ;;
-+(p7) ptc.l r16,r24
-+ mov ar.ccv=r28
-+#else
-+ ;;
-+1: ld8 r18=[r17]
-+ ;; // avoid RAW on r18
-+ or r18=_PAGE_A,r18 // set the accessed bit
-+ ;;
-+ st8 [r17]=r18 // store back updated PTE
-+ itc.d r18 // install updated PTE
-+#endif
-+ mov b0=r29 // restore b0
-+ mov pr=r31,-1
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RFI;
-+#else
-+ rfi
-+#endif
-+END(daccess_bit)
-+
-+ .org ia64_ivt+0x2c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
-+ENTRY(break_fault)
-+ /*
-+ * The streamlined system call entry/exit paths only save/restore the initial part
-+ * of pt_regs. This implies that the callers of system-calls must adhere to the
-+ * normal procedure calling conventions.
-+ *
-+ * Registers to be saved & restored:
-+ * CR registers: cr.ipsr, cr.iip, cr.ifs
-+ * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
-+ * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
-+ * Registers to be restored only:
-+ * r8-r11: output value from the system call.
-+ *
-+ * During system call exit, scratch registers (including r15) are modified/cleared
-+ * to prevent leaking bits from kernel to user level.
-+ */
-+ DBG_FAULT(11)
-+ mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat.
-+#ifdef CONFIG_XEN
-+ movl r31=XSI_IPSR
-+ ;;
-+ ld8 r29=[r31],XSI_IIP-XSI_IPSR // get ipsr, point to iip
-+ mov r18=__IA64_BREAK_SYSCALL
-+ mov r21=ar.fpsr
-+ ;;
-+ ld8 r28=[r31],XSI_IIM-XSI_IIP // get iip, point to iim
-+ mov r19=b6
-+ mov r25=ar.unat
-+ ;;
-+ ld8 r17=[r31] // get iim
-+ mov r27=ar.rsc
-+ mov r26=ar.pfs
-+ ;;
-+#else
-+ mov r17=cr.iim
-+ mov r18=__IA64_BREAK_SYSCALL
-+ mov r21=ar.fpsr
-+ mov r29=cr.ipsr
-+ mov r19=b6
-+ mov r25=ar.unat
-+ mov r27=ar.rsc
-+ mov r26=ar.pfs
-+ mov r28=cr.iip
-+#endif
-+ mov r31=pr // prepare to save predicates
-+ mov r20=r1
-+ ;;
-+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
-+ cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
-+(p7) br.cond.spnt non_syscall
-+ ;;
-+ ld1 r17=[r16] // load current->thread.on_ustack flag
-+ st1 [r16]=r0 // clear current->thread.on_ustack flag
-+ add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
-+ ;;
-+ invala
-+
-+ /* adjust return address so we skip over the break instruction: */
-+
-+ extr.u r8=r29,41,2 // extract ei field from cr.ipsr
-+ ;;
-+ cmp.eq p6,p7=2,r8 // isr.ei==2?
-+ mov r2=r1 // setup r2 for ia64_syscall_setup
-+ ;;
-+(p6) mov r8=0 // clear ei to 0
-+(p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
-+(p7) adds r8=1,r8 // increment ei to next slot
-+ ;;
-+ cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
-+ dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
-+ ;;
-+
-+ // switch from user to kernel RBS:
-+ MINSTATE_START_SAVE_MIN_VIRT
-+ br.call.sptk.many b7=ia64_syscall_setup
-+ ;;
-+#ifdef CONFIG_XEN
-+ mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;;
-+#else
-+ MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
-+#endif
-+#ifdef CONFIG_XEN
-+ movl r3=XSI_PSR_IC
-+ mov r16=1
-+ ;;
-+#if 1
-+ st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC // vpsr.ic = 1
-+ ;;
-+(p15) st4 [r3]=r16,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1
-+ mov r16=r0
-+ ;;
-+(p15) ld4 r16=[r3] // if (pending_interrupts)
-+ ;;
-+ cmp.ne p6,p0=r16,r0
-+ ;;
-+(p6) ssm psr.i // do a real ssm psr.i
-+ ;;
-+#else
-+// st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC // vpsr.ic = 1
-+ adds r3=XSI_PSR_I-XSI_PSR_IC,r3 // SKIP vpsr.ic = 1
-+ ;;
-+(p15) st4 [r3]=r16,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1
-+ mov r16=r0
-+ ;;
-+(p15) ld4 r16=[r3] // if (pending_interrupts)
-+ ;;
-+ cmp.ne p6,p0=r16,r0
-+ ;;
-+//(p6) ssm psr.i // do a real ssm psr.i
-+//(p6) XEN_HYPER_SSM_I;
-+(p6) break 0x7;
-+ ;;
-+#endif
-+ mov r3=NR_syscalls - 1
-+ ;;
-+#else
-+ ssm psr.ic | PSR_DEFAULT_BITS
-+ ;;
-+ srlz.i // guarantee that interruption collection is on
-+ mov r3=NR_syscalls - 1
-+ ;;
-+(p15) ssm psr.i // restore psr.i
-+#endif
-+ // p10==true means out registers are more than 8 or r15's Nat is true
-+(p10) br.cond.spnt.many ia64_ret_from_syscall
-+ ;;
-+ movl r16=sys_call_table
-+
-+ adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
-+ movl r2=ia64_ret_from_syscall
-+ ;;
-+ shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
-+ cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
-+ mov rp=r2 // set the real return addr
-+ ;;
-+(p6) ld8 r20=[r20] // load address of syscall entry point
-+(p7) movl r20=sys_ni_syscall
-+
-+ add r2=TI_FLAGS+IA64_TASK_SIZE,r13
-+ ;;
-+ ld4 r2=[r2] // r2 = current_thread_info()->flags
-+ ;;
-+ and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
-+ ;;
-+ cmp.eq p8,p0=r2,r0
-+ mov b6=r20
-+ ;;
-+(p8) br.call.sptk.many b6=b6 // ignore this return addr
-+ br.cond.sptk ia64_trace_syscall
-+ // NOT REACHED
-+END(break_fault)
-+
-+ .org ia64_ivt+0x3000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
-+ENTRY(interrupt)
-+ DBG_FAULT(12)
-+ mov r31=pr // prepare to save predicates
-+ ;;
-+ SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
-+#ifdef CONFIG_XEN
-+ movl r3=XSI_PSR_IC
-+ mov r14=1
-+ ;;
-+ st4 [r3]=r14
-+#else
-+ ssm psr.ic | PSR_DEFAULT_BITS
-+#endif
-+ ;;
-+ adds r3=8,r2 // set up second base pointer for SAVE_REST
-+ srlz.i // ensure everybody knows psr.ic is back on
-+ ;;
-+ SAVE_REST
-+ ;;
-+ alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
-+#ifdef CONFIG_XEN
-+ ;;
-+ br.call.sptk.many rp=xen_get_ivr
-+ ;;
-+ mov out0=r8 // pass cr.ivr as first arg
-+#else
-+ mov out0=cr.ivr // pass cr.ivr as first arg
-+#endif
-+ add out1=16,sp // pass pointer to pt_regs as second arg
-+ ;;
-+ srlz.d // make sure we see the effect of cr.ivr
-+ movl r14=ia64_leave_kernel
-+ ;;
-+ mov rp=r14
-+ br.call.sptk.many b6=ia64_handle_irq
-+END(interrupt)
-+
-+ .org ia64_ivt+0x3400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x3400 Entry 13 (size 64 bundles) Reserved
-+ DBG_FAULT(13)
-+ FAULT(13)
-+
-+ .org ia64_ivt+0x3800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x3800 Entry 14 (size 64 bundles) Reserved
-+ DBG_FAULT(14)
-+ FAULT(14)
-+
-+ /*
-+ * There is no particular reason for this code to be here, other than that
-+ * there happens to be space here that would go unused otherwise. If this
-+ * fault ever gets "unreserved", simply moved the following code to a more
-+ * suitable spot...
-+ *
-+ * ia64_syscall_setup() is a separate subroutine so that it can
-+ * allocate stacked registers so it can safely demine any
-+ * potential NaT values from the input registers.
-+ *
-+ * On entry:
-+ * - executing on bank 0 or bank 1 register set (doesn't matter)
-+ * - r1: stack pointer
-+ * - r2: current task pointer
-+ * - r3: preserved
-+ * - r11: original contents (saved ar.pfs to be saved)
-+ * - r12: original contents (sp to be saved)
-+ * - r13: original contents (tp to be saved)
-+ * - r15: original contents (syscall # to be saved)
-+ * - r18: saved bsp (after switching to kernel stack)
-+ * - r19: saved b6
-+ * - r20: saved r1 (gp)
-+ * - r21: saved ar.fpsr
-+ * - r22: kernel's register backing store base (krbs_base)
-+ * - r23: saved ar.bspstore
-+ * - r24: saved ar.rnat
-+ * - r25: saved ar.unat
-+ * - r26: saved ar.pfs
-+ * - r27: saved ar.rsc
-+ * - r28: saved cr.iip
-+ * - r29: saved cr.ipsr
-+ * - r31: saved pr
-+ * - b0: original contents (to be saved)
-+ * On exit:
-+ * - executing on bank 1 registers
-+ * - psr.ic enabled, interrupts restored
-+ * - p10: TRUE if syscall is invoked with more than 8 out
-+ * registers or r15's Nat is true
-+ * - r1: kernel's gp
-+ * - r3: preserved (same as on entry)
-+ * - r8: -EINVAL if p10 is true
-+ * - r12: points to kernel stack
-+ * - r13: points to current task
-+ * - p15: TRUE if interrupts need to be re-enabled
-+ * - ar.fpsr: set to kernel settings
-+ */
-+#ifndef CONFIG_XEN
-+GLOBAL_ENTRY(ia64_syscall_setup)
-+#if PT(B6) != 0
-+# error This code assumes that b6 is the first field in pt_regs.
-+#endif
-+ st8 [r1]=r19 // save b6
-+ add r16=PT(CR_IPSR),r1 // initialize first base pointer
-+ add r17=PT(R11),r1 // initialize second base pointer
-+ ;;
-+ alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
-+ st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
-+ tnat.nz p8,p0=in0
-+
-+ st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
-+ tnat.nz p9,p0=in1
-+(pKStk) mov r18=r0 // make sure r18 isn't NaT
-+ ;;
-+
-+ st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
-+ st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
-+ mov r28=b0 // save b0 (2 cyc)
-+ ;;
-+
-+ st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
-+ dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
-+(p8) mov in0=-1
-+ ;;
-+
-+ st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
-+ extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
-+ and r8=0x7f,r19 // A // get sof of ar.pfs
-+
-+ st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
-+ tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
-+(p9) mov in1=-1
-+ ;;
-+
-+(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
-+ tnat.nz p10,p0=in2
-+ add r11=8,r11
-+ ;;
-+(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
-+(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
-+ tnat.nz p11,p0=in3
-+ ;;
-+(p10) mov in2=-1
-+ tnat.nz p12,p0=in4 // [I0]
-+(p11) mov in3=-1
-+ ;;
-+(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
-+(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
-+ shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
-+ ;;
-+ st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
-+ st8 [r17]=r28,PT(R1)-PT(B0) // save b0
-+ tnat.nz p13,p0=in5 // [I0]
-+ ;;
-+ st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
-+ st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
-+(p12) mov in4=-1
-+ ;;
-+
-+.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
-+.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
-+(p13) mov in5=-1
-+ ;;
-+ st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
-+ tnat.nz p14,p0=in6
-+ cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
-+ ;;
-+ stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
-+(p9) tnat.nz p10,p0=r15
-+ adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
-+
-+ st8.spill [r17]=r15 // save r15
-+ tnat.nz p8,p0=in7
-+ nop.i 0
-+
-+ mov r13=r2 // establish `current'
-+ movl r1=__gp // establish kernel global pointer
-+ ;;
-+(p14) mov in6=-1
-+(p8) mov in7=-1
-+ nop.i 0
-+
-+ cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
-+ movl r17=FPSR_DEFAULT
-+ ;;
-+ mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
-+(p10) mov r8=-EINVAL
-+ br.ret.sptk.many b7
-+END(ia64_syscall_setup)
-+#endif
-+
-+ .org ia64_ivt+0x3c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x3c00 Entry 15 (size 64 bundles) Reserved
-+ DBG_FAULT(15)
-+ FAULT(15)
-+
-+ /*
-+ * Squatting in this space ...
-+ *
-+ * This special case dispatcher for illegal operation faults allows preserved
-+ * registers to be modified through a callback function (asm only) that is handed
-+ * back from the fault handler in r8. Up to three arguments can be passed to the
-+ * callback function by returning an aggregate with the callback as its first
-+ * element, followed by the arguments.
-+ */
-+ENTRY(dispatch_illegal_op_fault)
-+ SAVE_MIN_WITH_COVER
-+ ssm psr.ic | PSR_DEFAULT_BITS
-+ ;;
-+ srlz.i // guarantee that interruption collection is on
-+ ;;
-+(p15) ssm psr.i // restore psr.i
-+ adds r3=8,r2 // set up second base pointer for SAVE_REST
-+ ;;
-+ alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
-+ mov out0=ar.ec
-+ ;;
-+ SAVE_REST
-+ ;;
-+ br.call.sptk.many rp=ia64_illegal_op_fault
-+.ret0: ;;
-+ alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
-+ mov out0=r9
-+ mov out1=r10
-+ mov out2=r11
-+ movl r15=ia64_leave_kernel
-+ ;;
-+ mov rp=r15
-+ mov b6=r8
-+ ;;
-+ cmp.ne p6,p0=0,r8
-+(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
-+ br.sptk.many ia64_leave_kernel
-+END(dispatch_illegal_op_fault)
-+
-+ .org ia64_ivt+0x4000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x4000 Entry 16 (size 64 bundles) Reserved
-+ DBG_FAULT(16)
-+ FAULT(16)
-+
-+ .org ia64_ivt+0x4400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x4400 Entry 17 (size 64 bundles) Reserved
-+ DBG_FAULT(17)
-+ FAULT(17)
-+
-+ENTRY(non_syscall)
-+ SAVE_MIN_WITH_COVER
-+
-+ // There is no particular reason for this code to be here, other than that
-+ // there happens to be space here that would go unused otherwise. If this
-+ // fault ever gets "unreserved", simply moved the following code to a more
-+ // suitable spot...
-+
-+ alloc r14=ar.pfs,0,0,2,0
-+ mov out0=cr.iim
-+ add out1=16,sp
-+ adds r3=8,r2 // set up second base pointer for SAVE_REST
-+
-+ ssm psr.ic | PSR_DEFAULT_BITS
-+ ;;
-+ srlz.i // guarantee that interruption collection is on
-+ ;;
-+(p15) ssm psr.i // restore psr.i
-+ movl r15=ia64_leave_kernel
-+ ;;
-+ SAVE_REST
-+ mov rp=r15
-+ ;;
-+ br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
-+END(non_syscall)
-+
-+ .org ia64_ivt+0x4800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x4800 Entry 18 (size 64 bundles) Reserved
-+ DBG_FAULT(18)
-+ FAULT(18)
-+
-+ /*
-+ * There is no particular reason for this code to be here, other than that
-+ * there happens to be space here that would go unused otherwise. If this
-+ * fault ever gets "unreserved", simply moved the following code to a more
-+ * suitable spot...
-+ */
-+
-+ENTRY(dispatch_unaligned_handler)
-+ SAVE_MIN_WITH_COVER
-+ ;;
-+ alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
-+ mov out0=cr.ifa
-+ adds out1=16,sp
-+
-+ ssm psr.ic | PSR_DEFAULT_BITS
-+ ;;
-+ srlz.i // guarantee that interruption collection is on
-+ ;;
-+(p15) ssm psr.i // restore psr.i
-+ adds r3=8,r2 // set up second base pointer
-+ ;;
-+ SAVE_REST
-+ movl r14=ia64_leave_kernel
-+ ;;
-+ mov rp=r14
-+ br.sptk.many ia64_prepare_handle_unaligned
-+END(dispatch_unaligned_handler)
-+
-+ .org ia64_ivt+0x4c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x4c00 Entry 19 (size 64 bundles) Reserved
-+ DBG_FAULT(19)
-+ FAULT(19)
-+
-+ /*
-+ * There is no particular reason for this code to be here, other than that
-+ * there happens to be space here that would go unused otherwise. If this
-+ * fault ever gets "unreserved", simply moved the following code to a more
-+ * suitable spot...
-+ */
-+
-+ENTRY(dispatch_to_fault_handler)
-+ /*
-+ * Input:
-+ * psr.ic: off
-+ * r19: fault vector number (e.g., 24 for General Exception)
-+ * r31: contains saved predicates (pr)
-+ */
-+ SAVE_MIN_WITH_COVER_R19
-+ alloc r14=ar.pfs,0,0,5,0
-+ mov out0=r15
-+#ifdef CONFIG_XEN
-+ movl out1=XSI_ISR
-+ ;;
-+ adds out2=XSI_IFA-XSI_ISR,out1
-+ adds out3=XSI_IIM-XSI_ISR,out1
-+ adds out4=XSI_ITIR-XSI_ISR,out1
-+ ;;
-+ ld8 out1=[out1]
-+ ld8 out2=[out2]
-+ ld8 out3=[out4]
-+ ld8 out4=[out4]
-+ ;;
-+#else
-+ mov out1=cr.isr
-+ mov out2=cr.ifa
-+ mov out3=cr.iim
-+ mov out4=cr.itir
-+ ;;
-+#endif
-+ ssm psr.ic | PSR_DEFAULT_BITS
-+ ;;
-+ srlz.i // guarantee that interruption collection is on
-+ ;;
-+(p15) ssm psr.i // restore psr.i
-+ adds r3=8,r2 // set up second base pointer for SAVE_REST
-+ ;;
-+ SAVE_REST
-+ movl r14=ia64_leave_kernel
-+ ;;
-+ mov rp=r14
-+ br.call.sptk.many b6=ia64_fault
-+END(dispatch_to_fault_handler)
-+
-+//
-+// --- End of long entries, Beginning of short entries
-+//
-+
-+ .org ia64_ivt+0x5000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
-+ENTRY(page_not_present)
-+ DBG_FAULT(20)
-+ mov r16=cr.ifa
-+ rsm psr.dt
-+ /*
-+ * The Linux page fault handler doesn't expect non-present pages to be in
-+ * the TLB. Flush the existing entry now, so we meet that expectation.
-+ */
-+ mov r17=PAGE_SHIFT<<2
-+ ;;
-+ ptc.l r16,r17
-+ ;;
-+ mov r31=pr
-+ srlz.d
-+ br.sptk.many page_fault
-+END(page_not_present)
-+
-+ .org ia64_ivt+0x5100
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
-+ENTRY(key_permission)
-+ DBG_FAULT(21)
-+ mov r16=cr.ifa
-+ rsm psr.dt
-+ mov r31=pr
-+ ;;
-+ srlz.d
-+ br.sptk.many page_fault
-+END(key_permission)
-+
-+ .org ia64_ivt+0x5200
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
-+ENTRY(iaccess_rights)
-+ DBG_FAULT(22)
-+ mov r16=cr.ifa
-+ rsm psr.dt
-+ mov r31=pr
-+ ;;
-+ srlz.d
-+ br.sptk.many page_fault
-+END(iaccess_rights)
-+
-+ .org ia64_ivt+0x5300
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
-+ENTRY(daccess_rights)
-+ DBG_FAULT(23)
-+#ifdef CONFIG_XEN
-+ movl r16=XSI_IFA
-+ ;;
-+ ld8 r16=[r16]
-+ ;;
-+ XEN_HYPER_RSM_PSR_DT;
-+#else
-+ mov r16=cr.ifa
-+ rsm psr.dt
-+#endif
-+ mov r31=pr
-+ ;;
-+ srlz.d
-+ br.sptk.many page_fault
-+END(daccess_rights)
-+
-+ .org ia64_ivt+0x5400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
-+ENTRY(general_exception)
-+ DBG_FAULT(24)
-+ mov r16=cr.isr
-+ mov r31=pr
-+ ;;
-+ cmp4.eq p6,p0=0,r16
-+(p6) br.sptk.many dispatch_illegal_op_fault
-+ ;;
-+ mov r19=24 // fault number
-+ br.sptk.many dispatch_to_fault_handler
-+END(general_exception)
-+
-+ .org ia64_ivt+0x5500
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
-+ENTRY(disabled_fp_reg)
-+ DBG_FAULT(25)
-+ rsm psr.dfh // ensure we can access fph
-+ ;;
-+ srlz.d
-+ mov r31=pr
-+ mov r19=25
-+ br.sptk.many dispatch_to_fault_handler
-+END(disabled_fp_reg)
-+
-+ .org ia64_ivt+0x5600
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
-+ENTRY(nat_consumption)
-+ DBG_FAULT(26)
-+ FAULT(26)
-+END(nat_consumption)
-+
-+ .org ia64_ivt+0x5700
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
-+ENTRY(speculation_vector)
-+ DBG_FAULT(27)
-+ /*
-+ * A [f]chk.[as] instruction needs to take the branch to the recovery code but
-+ * this part of the architecture is not implemented in hardware on some CPUs, such
-+ * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
-+ * the relative target (not yet sign extended). So after sign extending it we
-+ * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
-+ * i.e., the slot to restart into.
-+ *
-+ * cr.imm contains zero_ext(imm21)
-+ */
-+ mov r18=cr.iim
-+ ;;
-+ mov r17=cr.iip
-+ shl r18=r18,43 // put sign bit in position (43=64-21)
-+ ;;
-+
-+ mov r16=cr.ipsr
-+ shr r18=r18,39 // sign extend (39=43-4)
-+ ;;
-+
-+ add r17=r17,r18 // now add the offset
-+ ;;
-+ mov cr.iip=r17
-+ dep r16=0,r16,41,2 // clear EI
-+ ;;
-+
-+ mov cr.ipsr=r16
-+ ;;
-+
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RFI;
-+#else
-+ rfi
-+#endif
-+END(speculation_vector)
-+
-+ .org ia64_ivt+0x5800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5800 Entry 28 (size 16 bundles) Reserved
-+ DBG_FAULT(28)
-+ FAULT(28)
-+
-+ .org ia64_ivt+0x5900
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
-+ENTRY(debug_vector)
-+ DBG_FAULT(29)
-+ FAULT(29)
-+END(debug_vector)
-+
-+ .org ia64_ivt+0x5a00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
-+ENTRY(unaligned_access)
-+ DBG_FAULT(30)
-+ mov r16=cr.ipsr
-+ mov r31=pr // prepare to save predicates
-+ ;;
-+ br.sptk.many dispatch_unaligned_handler
-+END(unaligned_access)
-+
-+ .org ia64_ivt+0x5b00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
-+ENTRY(unsupported_data_reference)
-+ DBG_FAULT(31)
-+ FAULT(31)
-+END(unsupported_data_reference)
-+
-+ .org ia64_ivt+0x5c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
-+ENTRY(floating_point_fault)
-+ DBG_FAULT(32)
-+ FAULT(32)
-+END(floating_point_fault)
-+
-+ .org ia64_ivt+0x5d00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
-+ENTRY(floating_point_trap)
-+ DBG_FAULT(33)
-+ FAULT(33)
-+END(floating_point_trap)
-+
-+ .org ia64_ivt+0x5e00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
-+ENTRY(lower_privilege_trap)
-+ DBG_FAULT(34)
-+ FAULT(34)
-+END(lower_privilege_trap)
-+
-+ .org ia64_ivt+0x5f00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
-+ENTRY(taken_branch_trap)
-+ DBG_FAULT(35)
-+ FAULT(35)
-+END(taken_branch_trap)
-+
-+ .org ia64_ivt+0x6000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
-+ENTRY(single_step_trap)
-+ DBG_FAULT(36)
-+ FAULT(36)
-+END(single_step_trap)
-+
-+ .org ia64_ivt+0x6100
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6100 Entry 37 (size 16 bundles) Reserved
-+ DBG_FAULT(37)
-+ FAULT(37)
-+
-+ .org ia64_ivt+0x6200
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6200 Entry 38 (size 16 bundles) Reserved
-+ DBG_FAULT(38)
-+ FAULT(38)
-+
-+ .org ia64_ivt+0x6300
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6300 Entry 39 (size 16 bundles) Reserved
-+ DBG_FAULT(39)
-+ FAULT(39)
-+
-+ .org ia64_ivt+0x6400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6400 Entry 40 (size 16 bundles) Reserved
-+ DBG_FAULT(40)
-+ FAULT(40)
-+
-+ .org ia64_ivt+0x6500
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6500 Entry 41 (size 16 bundles) Reserved
-+ DBG_FAULT(41)
-+ FAULT(41)
-+
-+ .org ia64_ivt+0x6600
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6600 Entry 42 (size 16 bundles) Reserved
-+ DBG_FAULT(42)
-+ FAULT(42)
-+
-+ .org ia64_ivt+0x6700
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6700 Entry 43 (size 16 bundles) Reserved
-+ DBG_FAULT(43)
-+ FAULT(43)
-+
-+ .org ia64_ivt+0x6800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6800 Entry 44 (size 16 bundles) Reserved
-+ DBG_FAULT(44)
-+ FAULT(44)
-+
-+ .org ia64_ivt+0x6900
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
-+ENTRY(ia32_exception)
-+ DBG_FAULT(45)
-+ FAULT(45)
-+END(ia32_exception)
-+
-+ .org ia64_ivt+0x6a00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
-+ENTRY(ia32_intercept)
-+ DBG_FAULT(46)
-+#ifdef CONFIG_IA32_SUPPORT
-+ mov r31=pr
-+ mov r16=cr.isr
-+ ;;
-+ extr.u r17=r16,16,8 // get ISR.code
-+ mov r18=ar.eflag
-+ mov r19=cr.iim // old eflag value
-+ ;;
-+ cmp.ne p6,p0=2,r17
-+(p6) br.cond.spnt 1f // not a system flag fault
-+ xor r16=r18,r19
-+ ;;
-+ extr.u r17=r16,18,1 // get the eflags.ac bit
-+ ;;
-+ cmp.eq p6,p0=0,r17
-+(p6) br.cond.spnt 1f // eflags.ac bit didn't change
-+ ;;
-+ mov pr=r31,-1 // restore predicate registers
-+#ifdef CONFIG_XEN
-+ XEN_HYPER_RFI;
-+#else
-+ rfi
-+#endif
-+
-+1:
-+#endif // CONFIG_IA32_SUPPORT
-+ FAULT(46)
-+END(ia32_intercept)
-+
-+ .org ia64_ivt+0x6b00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
-+ENTRY(ia32_interrupt)
-+ DBG_FAULT(47)
-+#ifdef CONFIG_IA32_SUPPORT
-+ mov r31=pr
-+ br.sptk.many dispatch_to_ia32_handler
-+#else
-+ FAULT(47)
-+#endif
-+END(ia32_interrupt)
-+
-+ .org ia64_ivt+0x6c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6c00 Entry 48 (size 16 bundles) Reserved
-+ DBG_FAULT(48)
-+ FAULT(48)
-+
-+ .org ia64_ivt+0x6d00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6d00 Entry 49 (size 16 bundles) Reserved
-+ DBG_FAULT(49)
-+ FAULT(49)
-+
-+ .org ia64_ivt+0x6e00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6e00 Entry 50 (size 16 bundles) Reserved
-+ DBG_FAULT(50)
-+ FAULT(50)
-+
-+ .org ia64_ivt+0x6f00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6f00 Entry 51 (size 16 bundles) Reserved
-+ DBG_FAULT(51)
-+ FAULT(51)
-+
-+ .org ia64_ivt+0x7000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7000 Entry 52 (size 16 bundles) Reserved
-+ DBG_FAULT(52)
-+ FAULT(52)
-+
-+ .org ia64_ivt+0x7100
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7100 Entry 53 (size 16 bundles) Reserved
-+ DBG_FAULT(53)
-+ FAULT(53)
-+
-+ .org ia64_ivt+0x7200
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7200 Entry 54 (size 16 bundles) Reserved
-+ DBG_FAULT(54)
-+ FAULT(54)
-+
-+ .org ia64_ivt+0x7300
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7300 Entry 55 (size 16 bundles) Reserved
-+ DBG_FAULT(55)
-+ FAULT(55)
-+
-+ .org ia64_ivt+0x7400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7400 Entry 56 (size 16 bundles) Reserved
-+ DBG_FAULT(56)
-+ FAULT(56)
-+
-+ .org ia64_ivt+0x7500
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7500 Entry 57 (size 16 bundles) Reserved
-+ DBG_FAULT(57)
-+ FAULT(57)
-+
-+ .org ia64_ivt+0x7600
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7600 Entry 58 (size 16 bundles) Reserved
-+ DBG_FAULT(58)
-+ FAULT(58)
-+
-+ .org ia64_ivt+0x7700
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7700 Entry 59 (size 16 bundles) Reserved
-+ DBG_FAULT(59)
-+ FAULT(59)
-+
-+ .org ia64_ivt+0x7800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7800 Entry 60 (size 16 bundles) Reserved
-+ DBG_FAULT(60)
-+ FAULT(60)
-+
-+ .org ia64_ivt+0x7900
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7900 Entry 61 (size 16 bundles) Reserved
-+ DBG_FAULT(61)
-+ FAULT(61)
-+
-+ .org ia64_ivt+0x7a00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7a00 Entry 62 (size 16 bundles) Reserved
-+ DBG_FAULT(62)
-+ FAULT(62)
-+
-+ .org ia64_ivt+0x7b00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7b00 Entry 63 (size 16 bundles) Reserved
-+ DBG_FAULT(63)
-+ FAULT(63)
-+
-+ .org ia64_ivt+0x7c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7c00 Entry 64 (size 16 bundles) Reserved
-+ DBG_FAULT(64)
-+ FAULT(64)
-+
-+ .org ia64_ivt+0x7d00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7d00 Entry 65 (size 16 bundles) Reserved
-+ DBG_FAULT(65)
-+ FAULT(65)
-+
-+ .org ia64_ivt+0x7e00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7e00 Entry 66 (size 16 bundles) Reserved
-+ DBG_FAULT(66)
-+ FAULT(66)
-+
-+#ifdef CONFIG_XEN
-+ /*
-+ * There is no particular reason for this code to be here, other than that
-+ * there happens to be space here that would go unused otherwise. If this
-+ * fault ever gets "unreserved", simply moved the following code to a more
-+ * suitable spot...
-+ */
-+
-+GLOBAL_ENTRY(xen_bsw1)
-+ /* FIXME: THIS CODE IS NOT NaT SAFE! */
-+ movl r30=XSI_BANKNUM;
-+ mov r31=1;;
-+ st4 [r30]=r31;
-+ movl r30=XSI_BANK1_R16;
-+ movl r31=XSI_BANK1_R16+8;;
-+ ld8 r16=[r30],16; ld8 r17=[r31],16;;
-+ ld8 r18=[r30],16; ld8 r19=[r31],16;;
-+ ld8 r20=[r30],16; ld8 r21=[r31],16;;
-+ ld8 r22=[r30],16; ld8 r23=[r31],16;;
-+ ld8 r24=[r30],16; ld8 r25=[r31],16;;
-+ ld8 r26=[r30],16; ld8 r27=[r31],16;;
-+ ld8 r28=[r30],16; ld8 r29=[r31],16;;
-+ ld8 r30=[r30]; ld8 r31=[r31];;
-+ br.ret.sptk.many b0
-+#endif
-+
-+ .org ia64_ivt+0x7f00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7f00 Entry 67 (size 16 bundles) Reserved
-+ DBG_FAULT(67)
-+ FAULT(67)
-+
-+#ifdef CONFIG_IA32_SUPPORT
-+
-+ /*
-+ * There is no particular reason for this code to be here, other than that
-+ * there happens to be space here that would go unused otherwise. If this
-+ * fault ever gets "unreserved", simply moved the following code to a more
-+ * suitable spot...
-+ */
-+
-+ // IA32 interrupt entry point
-+
-+ENTRY(dispatch_to_ia32_handler)
-+ SAVE_MIN
-+ ;;
-+ mov r14=cr.isr
-+ ssm psr.ic | PSR_DEFAULT_BITS
-+ ;;
-+ srlz.i // guarantee that interruption collection is on
-+ ;;
-+(p15) ssm psr.i
-+ adds r3=8,r2 // Base pointer for SAVE_REST
-+ ;;
-+ SAVE_REST
-+ ;;
-+ mov r15=0x80
-+ shr r14=r14,16 // Get interrupt number
-+ ;;
-+ cmp.ne p6,p0=r14,r15
-+(p6) br.call.dpnt.many b6=non_ia32_syscall
-+
-+ adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
-+ adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
-+ ;;
-+ cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
-+ ld8 r8=[r14] // get r8
-+ ;;
-+ st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
-+ ;;
-+ alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
-+ ;;
-+ ld4 r8=[r14],8 // r8 == eax (syscall number)
-+ mov r15=IA32_NR_syscalls
-+ ;;
-+ cmp.ltu.unc p6,p7=r8,r15
-+ ld4 out1=[r14],8 // r9 == ecx
-+ ;;
-+ ld4 out2=[r14],8 // r10 == edx
-+ ;;
-+ ld4 out0=[r14] // r11 == ebx
-+ adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
-+ ;;
-+ ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
-+ ;;
-+ ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
-+ adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
-+ ;;
-+ ld4 out4=[r14] // r15 == edi
-+ movl r16=ia32_syscall_table
-+ ;;
-+(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
-+ ld4 r2=[r2] // r2 = current_thread_info()->flags
-+ ;;
-+ ld8 r16=[r16]
-+ and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
-+ ;;
-+ mov b6=r16
-+ movl r15=ia32_ret_from_syscall
-+ cmp.eq p8,p0=r2,r0
-+ ;;
-+ mov rp=r15
-+(p8) br.call.sptk.many b6=b6
-+ br.cond.sptk ia32_trace_syscall
-+
-+non_ia32_syscall:
-+ alloc r15=ar.pfs,0,0,2,0
-+ mov out0=r14 // interrupt #
-+ add out1=16,sp // pointer to pt_regs
-+ ;; // avoid WAW on CFM
-+ br.call.sptk.many rp=ia32_bad_interrupt
-+.ret1: movl r15=ia64_leave_kernel
-+ ;;
-+ mov rp=r15
-+ br.ret.sptk.many rp
-+END(dispatch_to_ia32_handler)
-+
-+#endif /* CONFIG_IA32_SUPPORT */
-diff --git a/arch/ia64/xen/xenminstate.h b/arch/ia64/xen/xenminstate.h
-new file mode 100644
-index 0000000..c6df57e
---- /dev/null
-+++ b/arch/ia64/xen/xenminstate.h
-@@ -0,0 +1,367 @@
-+#include <linux/config.h>
-+
-+#include <asm/cache.h>
-+
-+#ifdef CONFIG_XEN
-+#include "../kernel/entry.h"
-+#else
-+#include "entry.h"
-+#endif
-+
-+/*
-+ * For ivt.s we want to access the stack virtually so we don't have to disable translation
-+ * on interrupts.
-+ *
-+ * On entry:
-+ * r1: pointer to current task (ar.k6)
-+ */
-+#define MINSTATE_START_SAVE_MIN_VIRT \
-+(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
-+ ;; \
-+(pUStk) mov.m r24=ar.rnat; \
-+(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
-+(pKStk) mov r1=sp; /* get sp */ \
-+ ;; \
-+(pUStk) lfetch.fault.excl.nt1 [r22]; \
-+(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-+(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
-+ ;; \
-+(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
-+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
-+ ;; \
-+(pUStk) mov r18=ar.bsp; \
-+(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
-+
-+#define MINSTATE_END_SAVE_MIN_VIRT \
-+ bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
-+ ;;
-+
-+/*
-+ * For mca_asm.S we want to access the stack physically since the state is saved before we
-+ * go virtual and don't want to destroy the iip or ipsr.
-+ */
-+#define MINSTATE_START_SAVE_MIN_PHYS \
-+(pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \
-+(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
-+(pKStk) ld8 r3 = [r3];; \
-+(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
-+(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
-+(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
-+(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
-+ ;; \
-+(pUStk) mov r24=ar.rnat; \
-+(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-+(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
-+(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
-+ ;; \
-+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
-+(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
-+ ;; \
-+(pUStk) mov r18=ar.bsp; \
-+(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
-+
-+#define MINSTATE_END_SAVE_MIN_PHYS \
-+ dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \
-+ ;;
-+
-+#ifdef MINSTATE_VIRT
-+# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT)
-+# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT
-+# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT
-+#endif
-+
-+#ifdef MINSTATE_PHYS
-+# define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg
-+# define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS
-+# define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS
-+#endif
-+
-+/*
-+ * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
-+ * the minimum state necessary that allows us to turn psr.ic back
-+ * on.
-+ *
-+ * Assumed state upon entry:
-+ * psr.ic: off
-+ * r31: contains saved predicates (pr)
-+ *
-+ * Upon exit, the state is as follows:
-+ * psr.ic: off
-+ * r2 = points to &pt_regs.r16
-+ * r8 = contents of ar.ccv
-+ * r9 = contents of ar.csd
-+ * r10 = contents of ar.ssd
-+ * r11 = FPSR_DEFAULT
-+ * r12 = kernel sp (kernel virtual address)
-+ * r13 = points to current task_struct (kernel virtual address)
-+ * p15 = TRUE if psr.i is set in cr.ipsr
-+ * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
-+ * preserved
-+ * CONFIG_XEN note: p6/p7 are not preserved
-+ *
-+ * Note that psr.ic is NOT turned on by this macro. This is so that
-+ * we can pass interruption state as arguments to a handler.
-+ */
-+#ifdef CONFIG_XEN
-+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
-+ MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
-+ mov r27=ar.rsc; /* M */ \
-+ mov r20=r1; /* A */ \
-+ mov r25=ar.unat; /* M */ \
-+ /* mov r29=cr.ipsr; /* M */ \
-+ movl r29=XSI_IPSR;; \
-+ ld8 r29=[r29];; \
-+ mov r26=ar.pfs; /* I */ \
-+ /* mov r28=cr.iip; /* M */ \
-+ movl r28=XSI_IIP;; \
-+ ld8 r28=[r28];; \
-+ mov r21=ar.fpsr; /* M */ \
-+ COVER; /* B;; (or nothing) */ \
-+ ;; \
-+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
-+ ;; \
-+ ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
-+ st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
-+ adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
-+ /* switch from user to kernel RBS: */ \
-+ ;; \
-+ invala; /* M */ \
-+ /* SAVE_IFS; /* see xen special handling below */ \
-+ cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
-+ ;; \
-+ MINSTATE_START_SAVE_MIN \
-+ adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
-+ adds r16=PT(CR_IPSR),r1; \
-+ ;; \
-+ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
-+ st8 [r16]=r29; /* save cr.ipsr */ \
-+ ;; \
-+ lfetch.fault.excl.nt1 [r17]; \
-+ tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
-+ mov r29=b0 \
-+ ;; \
-+ adds r16=PT(R8),r1; /* initialize first base pointer */ \
-+ adds r17=PT(R9),r1; /* initialize second base pointer */ \
-+(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r8,16; \
-+.mem.offset 8,0; st8.spill [r17]=r9,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r10,24; \
-+.mem.offset 8,0; st8.spill [r17]=r11,24; \
-+ ;; \
-+ /* xen special handling for possibly lazy cover */ \
-+ movl r8=XSI_INCOMPL_REGFR; \
-+ ;; \
-+ ld4 r30=[r8]; \
-+ ;; \
-+ cmp.eq p6,p7=r30,r0; \
-+ ;; /* not sure if this stop bit is necessary */ \
-+(p6) adds r8=XSI_PRECOVER_IFS-XSI_INCOMPL_REGFR,r8; \
-+(p7) adds r8=XSI_IFS-XSI_INCOMPL_REGFR,r8; \
-+ ;; \
-+ ld8 r30=[r8]; \
-+ ;; \
-+ st8 [r16]=r28,16; /* save cr.iip */ \
-+ st8 [r17]=r30,16; /* save cr.ifs */ \
-+(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
-+ mov r8=ar.ccv; \
-+ mov r9=ar.csd; \
-+ mov r10=ar.ssd; \
-+ movl r11=FPSR_DEFAULT; /* L-unit */ \
-+ ;; \
-+ st8 [r16]=r25,16; /* save ar.unat */ \
-+ st8 [r17]=r26,16; /* save ar.pfs */ \
-+ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
-+ ;; \
-+ st8 [r16]=r27,16; /* save ar.rsc */ \
-+(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
-+(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
-+ ;; /* avoid RAW on r16 & r17 */ \
-+(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
-+ st8 [r17]=r31,16; /* save predicates */ \
-+(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
-+ ;; \
-+ st8 [r16]=r29,16; /* save b0 */ \
-+ st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
-+ cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
-+.mem.offset 8,0; st8.spill [r17]=r12,16; \
-+ adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r13,16; \
-+.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
-+ mov r13=IA64_KR(CURRENT); /* establish `current' */ \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r15,16; \
-+.mem.offset 8,0; st8.spill [r17]=r14,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r2,16; \
-+.mem.offset 8,0; st8.spill [r17]=r3,16; \
-+ ;; \
-+ EXTRA; \
-+ mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2; \
-+ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
-+ ;; \
-+ movl r1=__gp; /* establish kernel global pointer */ \
-+ ;; \
-+ /* MINSTATE_END_SAVE_MIN */
-+#else
-+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
-+ MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
-+ mov r27=ar.rsc; /* M */ \
-+ mov r20=r1; /* A */ \
-+ mov r25=ar.unat; /* M */ \
-+ mov r29=cr.ipsr; /* M */ \
-+ mov r26=ar.pfs; /* I */ \
-+ mov r28=cr.iip; /* M */ \
-+ mov r21=ar.fpsr; /* M */ \
-+ COVER; /* B;; (or nothing) */ \
-+ ;; \
-+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
-+ ;; \
-+ ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
-+ st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
-+ adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
-+ /* switch from user to kernel RBS: */ \
-+ ;; \
-+ invala; /* M */ \
-+ SAVE_IFS; \
-+ cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
-+ ;; \
-+ MINSTATE_START_SAVE_MIN \
-+ adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
-+ adds r16=PT(CR_IPSR),r1; \
-+ ;; \
-+ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
-+ st8 [r16]=r29; /* save cr.ipsr */ \
-+ ;; \
-+ lfetch.fault.excl.nt1 [r17]; \
-+ tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
-+ mov r29=b0 \
-+ ;; \
-+ adds r16=PT(R8),r1; /* initialize first base pointer */ \
-+ adds r17=PT(R9),r1; /* initialize second base pointer */ \
-+(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r8,16; \
-+.mem.offset 8,0; st8.spill [r17]=r9,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r10,24; \
-+.mem.offset 8,0; st8.spill [r17]=r11,24; \
-+ ;; \
-+ st8 [r16]=r28,16; /* save cr.iip */ \
-+ st8 [r17]=r30,16; /* save cr.ifs */ \
-+(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
-+ mov r8=ar.ccv; \
-+ mov r9=ar.csd; \
-+ mov r10=ar.ssd; \
-+ movl r11=FPSR_DEFAULT; /* L-unit */ \
-+ ;; \
-+ st8 [r16]=r25,16; /* save ar.unat */ \
-+ st8 [r17]=r26,16; /* save ar.pfs */ \
-+ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
-+ ;; \
-+ st8 [r16]=r27,16; /* save ar.rsc */ \
-+(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
-+(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
-+ ;; /* avoid RAW on r16 & r17 */ \
-+(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
-+ st8 [r17]=r31,16; /* save predicates */ \
-+(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
-+ ;; \
-+ st8 [r16]=r29,16; /* save b0 */ \
-+ st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
-+ cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
-+.mem.offset 8,0; st8.spill [r17]=r12,16; \
-+ adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r13,16; \
-+.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
-+ mov r13=IA64_KR(CURRENT); /* establish `current' */ \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r15,16; \
-+.mem.offset 8,0; st8.spill [r17]=r14,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r16]=r2,16; \
-+.mem.offset 8,0; st8.spill [r17]=r3,16; \
-+ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
-+ ;; \
-+ EXTRA; \
-+ movl r1=__gp; /* establish kernel global pointer */ \
-+ ;; \
-+ MINSTATE_END_SAVE_MIN
-+#endif
-+
-+/*
-+ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
-+ *
-+ * Assumed state upon entry:
-+ * psr.ic: on
-+ * r2: points to &pt_regs.r16
-+ * r3: points to &pt_regs.r17
-+ * r8: contents of ar.ccv
-+ * r9: contents of ar.csd
-+ * r10: contents of ar.ssd
-+ * r11: FPSR_DEFAULT
-+ *
-+ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
-+ */
-+#define SAVE_REST \
-+.mem.offset 0,0; st8.spill [r2]=r16,16; \
-+.mem.offset 8,0; st8.spill [r3]=r17,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r2]=r18,16; \
-+.mem.offset 8,0; st8.spill [r3]=r19,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r2]=r20,16; \
-+.mem.offset 8,0; st8.spill [r3]=r21,16; \
-+ mov r18=b6; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r2]=r22,16; \
-+.mem.offset 8,0; st8.spill [r3]=r23,16; \
-+ mov r19=b7; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r2]=r24,16; \
-+.mem.offset 8,0; st8.spill [r3]=r25,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r2]=r26,16; \
-+.mem.offset 8,0; st8.spill [r3]=r27,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r2]=r28,16; \
-+.mem.offset 8,0; st8.spill [r3]=r29,16; \
-+ ;; \
-+.mem.offset 0,0; st8.spill [r2]=r30,16; \
-+.mem.offset 8,0; st8.spill [r3]=r31,32; \
-+ ;; \
-+ mov ar.fpsr=r11; /* M-unit */ \
-+ st8 [r2]=r8,8; /* ar.ccv */ \
-+ adds r24=PT(B6)-PT(F7),r3; \
-+ ;; \
-+ stf.spill [r2]=f6,32; \
-+ stf.spill [r3]=f7,32; \
-+ ;; \
-+ stf.spill [r2]=f8,32; \
-+ stf.spill [r3]=f9,32; \
-+ ;; \
-+ stf.spill [r2]=f10; \
-+ stf.spill [r3]=f11; \
-+ adds r25=PT(B7)-PT(F11),r3; \
-+ ;; \
-+ st8 [r24]=r18,16; /* b6 */ \
-+ st8 [r25]=r19,16; /* b7 */ \
-+ ;; \
-+ st8 [r24]=r9; /* ar.csd */ \
-+ st8 [r25]=r10; /* ar.ssd */ \
-+ ;;
-+
-+#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
-+#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
-+#ifdef CONFIG_XEN
-+#define SAVE_MIN break 0;; /* FIXME: non-cover version only for ia32 support? */
-+#else
-+#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
-+#endif
-diff --git a/arch/ia64/xen/xenpal.S b/arch/ia64/xen/xenpal.S
-new file mode 100644
-index 0000000..2fd45e2
---- /dev/null
-+++ b/arch/ia64/xen/xenpal.S
-@@ -0,0 +1,73 @@
-+/*
-+ * ia64/xen/xenpal.S
-+ *
-+ * Alternate PAL routines for Xen. Heavily leveraged from
-+ * ia64/kernel/pal.S
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ * Dan Magenheimer <dan.magenheimer@.hp.com>
-+ */
-+
-+#include <asm/asmmacro.h>
-+#include <asm/processor.h>
-+
-+GLOBAL_ENTRY(xen_pal_call_static)
-+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
-+ alloc loc1 = ar.pfs,5,5,0,0
-+#ifdef CONFIG_XEN
-+ movl r22=running_on_xen;;
-+ ld4 r22=[r22];;
-+ cmp.eq p7,p0=r22,r0
-+(p7) br.cond.spnt.many __ia64_pal_call_static;;
-+#endif
-+ movl loc2 = pal_entry_point
-+1: {
-+ mov r28 = in0
-+ mov r29 = in1
-+ mov r8 = ip
-+ }
-+ ;;
-+ ld8 loc2 = [loc2] // loc2 <- entry point
-+ tbit.nz p6,p7 = in4, 0
-+ adds r8 = 1f-1b,r8
-+ mov loc4=ar.rsc // save RSE configuration
-+ ;;
-+ mov ar.rsc=0 // put RSE in enforced lazy, LE mode
-+ mov loc3 = psr
-+ mov loc0 = rp
-+ .body
-+ mov r30 = in2
-+
-+#ifdef CONFIG_XEN
-+ // this is low priority for paravirtualization, but is called
-+ // from the idle loop so confuses privop counting
-+ movl r31=XSI_PSR_IC
-+ ;;
-+(p6) st8 [r31]=r0
-+ ;;
-+(p7) adds r31=XSI_PSR_I-XSI_PSR_IC,r31
-+ ;;
-+(p7) st4 [r31]=r0
-+ ;;
-+ mov r31 = in3
-+ mov b7 = loc2
-+ ;;
-+#else
-+(p6) rsm psr.i | psr.ic
-+ mov r31 = in3
-+ mov b7 = loc2
-+
-+(p7) rsm psr.i
-+ ;;
-+(p6) srlz.i
-+#endif
-+ mov rp = r8
-+ br.cond.sptk.many b7
-+1: mov psr.l = loc3
-+ mov ar.rsc = loc4 // restore RSE configuration
-+ mov ar.pfs = loc1
-+ mov rp = loc0
-+ ;;
-+ srlz.d // seralize restoration of psr.l
-+ br.ret.sptk.many b0
-+END(xen_pal_call_static)
-diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
-new file mode 100644
-index 0000000..c611974
---- /dev/null
-+++ b/arch/ia64/xen/xensetup.S
-@@ -0,0 +1,35 @@
-+/*
-+ * Support routines for Xen
-+ *
-+ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
-+ */
-+
-+#include <linux/config.h>
-+#include <asm/processor.h>
-+#include <asm/asmmacro.h>
-+
-+ .data
-+ .align 8
-+ .globl running_on_xen
-+running_on_xen:
-+ data4 0
-+
-+#define isBP p3 // are we the Bootstrap Processor?
-+
-+ .text
-+GLOBAL_ENTRY(early_xen_setup)
-+ mov r8=cr.dcr
-+(isBP) movl r9=running_on_xen;;
-+ extr.u r8=r8,63,1;;
-+ cmp.ne p7,p0=r8,r0;;
-+(isBP) st4 [r9]=r8
-+(p7) movl r10=xen_ivt;;
-+(p7) mov cr.iva=r10
-+ br.ret.sptk.many rp;;
-+END(early_xen_setup)
-+
-+GLOBAL_ENTRY(is_running_on_xen)
-+ movl r9=running_on_xen;;
-+ ld4 r8=[r9]
-+ br.ret.sptk.many rp;;
-+END(is_running_on_xen)
-diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
-index 544665e..dece10d 100644
---- a/arch/um/kernel/physmem.c
-+++ b/arch/um/kernel/physmem.c
-@@ -225,7 +225,7 @@ EXPORT_SYMBOL(physmem_forget_descriptor)
- EXPORT_SYMBOL(physmem_remove_mapping);
- EXPORT_SYMBOL(physmem_subst_mapping);
-
--void arch_free_page(struct page *page, int order)
-+int arch_free_page(struct page *page, int order)
- {
- void *virt;
- int i;
-@@ -234,6 +234,8 @@ void arch_free_page(struct page *page, i
- virt = __va(page_to_phys(page + i));
- physmem_remove_mapping(virt);
- }
-+
-+ return 0;
- }
-
- int is_remapped(void *virt)
-diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
-index e18eb79..75e35a5 100644
---- a/arch/x86_64/Kconfig
-+++ b/arch/x86_64/Kconfig
-@@ -119,6 +119,22 @@ config GENERIC_CPU
-
- endchoice
-
-+config X86_64_XEN
-+ bool "Enable Xen compatible kernel"
-+ select SWIOTLB
-+ help
-+ This option will compile a kernel compatible with Xen hypervisor
-+
-+config X86_NO_TSS
-+ bool
-+ depends on X86_64_XEN
-+ default y
-+
-+config X86_NO_IDT
-+ bool
-+ depends on X86_64_XEN
-+ default y
-+
- #
- # Define implied options from the CPU selection here
- #
-@@ -134,6 +150,7 @@ config X86_L1_CACHE_SHIFT
-
- config X86_TSC
- bool
-+ depends on !X86_64_XEN
- default y
-
- config X86_GOOD_APIC
-@@ -176,7 +193,7 @@ config X86_CPUID
-
- config X86_HT
- bool
-- depends on SMP && !MK8
-+ depends on SMP && !MK8 && !X86_64_XEN
- default y
-
- config MATH_EMULATION
-@@ -190,14 +207,22 @@ config EISA
-
- config X86_IO_APIC
- bool
-+ depends !XEN_UNPRIVILEGED_GUEST
- default y
-
-+config X86_XEN_GENAPIC
-+ bool
-+ depends X86_64_XEN
-+ default XEN_PRIVILEGED_GUEST || SMP
-+
- config X86_LOCAL_APIC
- bool
-+ depends !XEN_UNPRIVILEGED_GUEST
- default y
-
- config MTRR
- bool "MTRR (Memory Type Range Register) support"
-+ depends on !X86_64_XEN
- ---help---
- On Intel P6 family processors (Pentium Pro, Pentium II and later)
- the Memory Type Range Registers (MTRRs) may be used to control
-@@ -238,7 +263,7 @@ config SMP
-
- config SCHED_SMT
- bool "SMT (Hyperthreading) scheduler support"
-- depends on SMP
-+ depends on SMP && !X86_64_XEN
- default n
- help
- SMT scheduler support improves the CPU scheduler's decision making
-@@ -250,7 +275,7 @@ source "kernel/Kconfig.preempt"
-
- config NUMA
- bool "Non Uniform Memory Access (NUMA) Support"
-- depends on SMP
-+ depends on SMP && !X86_64_XEN
- help
- Enable NUMA (Non Uniform Memory Access) support. The kernel
- will try to allocate memory used by a CPU on the local memory
-@@ -325,6 +350,7 @@ config NR_CPUS
- int "Maximum number of CPUs (2-256)"
- range 2 256
- depends on SMP
-+ default "16" if X86_64_XEN
- default "8"
- help
- This allows you to specify the maximum number of CPUs which this
-@@ -345,6 +371,7 @@ config HOTPLUG_CPU
-
- config HPET_TIMER
- bool
-+ depends on !X86_64_XEN
- default y
- help
- Use the IA-PC HPET (High Precision Event Timer) to manage
-@@ -362,7 +389,7 @@ config GART_IOMMU
- bool "K8 GART IOMMU support"
- default y
- select SWIOTLB
-- depends on PCI
-+ depends on PCI && !X86_64_XEN
- help
- Support the IOMMU. Needed to run systems with more than 3GB of memory
- properly with 32-bit PCI devices that do not support DAC (Double Address
-@@ -380,6 +407,7 @@ config SWIOTLB
-
- config X86_MCE
- bool "Machine check support" if EMBEDDED
-+ depends on !X86_64_XEN
- default y
- help
- Include a machine check error handler to report hardware errors.
-@@ -405,7 +433,7 @@ config X86_MCE_AMD
-
- config KEXEC
- bool "kexec system call (EXPERIMENTAL)"
-- depends on EXPERIMENTAL
-+ depends on EXPERIMENTAL && !X86_64_XEN
- help
- kexec is a system call that implements the ability to shutdown your
- current kernel, and to start another kernel. It is like a reboot
-@@ -488,8 +516,11 @@ config GENERIC_PENDING_IRQ
- default y
-
- menu "Power management options"
-+ depends on !XEN_UNPRIVILEGED_GUEST
-
-+if !X86_64_XEN
- source kernel/power/Kconfig
-+endif
-
- source "drivers/acpi/Kconfig"
-
-@@ -512,6 +543,21 @@ config PCI_MMCONFIG
- bool "Support mmconfig PCI config space access"
- depends on PCI && ACPI
-
-+config XEN_PCIDEV_FRONTEND
-+ bool "Xen PCI Frontend"
-+ depends on PCI && X86_64_XEN
-+ default y
-+ help
-+ The PCI device frontend driver allows the kernel to import arbitrary
-+ PCI devices from a PCI backend to support PCI driver domains.
-+
-+config XEN_PCIDEV_FE_DEBUG
-+ bool "Xen PCI Frontend Debugging"
-+ depends on XEN_PCIDEV_FRONTEND
-+ default n
-+ help
-+ Enables some debug statements within the PCI Frontend.
-+
- config UNORDERED_IO
- bool "Unordered IO mapping access"
- depends on EXPERIMENTAL
-@@ -522,6 +568,7 @@ config UNORDERED_IO
- from i386. Requires that the driver writer used memory barriers
- properly.
-
-+if !X86_64_XEN
- source "drivers/pci/pcie/Kconfig"
-
- source "drivers/pci/Kconfig"
-@@ -529,6 +576,7 @@ source "drivers/pci/Kconfig"
- source "drivers/pcmcia/Kconfig"
-
- source "drivers/pci/hotplug/Kconfig"
-+endif
-
- endmenu
-
-@@ -592,4 +640,6 @@ source "security/Kconfig"
-
- source "crypto/Kconfig"
-
-+source "drivers/xen/Kconfig"
-+
- source "lib/Kconfig"
-diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
-index d7fd464..05a611c 100644
---- a/arch/x86_64/Makefile
-+++ b/arch/x86_64/Makefile
-@@ -70,6 +70,20 @@ boot := arch/x86_64/boot
- .PHONY: bzImage bzlilo install archmrproper \
- fdimage fdimage144 fdimage288 archclean
-
-+ifdef CONFIG_XEN
-+head-y := arch/x86_64/kernel/head-xen.o arch/x86_64/kernel/head64-xen.o arch/x86_64/kernel/init_task.o
-+LDFLAGS_vmlinux := -e _start
-+boot := arch/i386/boot-xen
-+.PHONY: vmlinuz
-+#Default target when executing "make"
-+all: vmlinuz
-+
-+vmlinuz: vmlinux
-+ $(Q)$(MAKE) $(build)=$(boot) $@
-+
-+install:
-+ $(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
-+else
- #Default target when executing "make"
- all: bzImage
-
-@@ -90,6 +104,7 @@ fdimage fdimage144 fdimage288: vmlinux
-
- install:
- $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
-+endif
-
- archclean:
- $(Q)$(MAKE) $(clean)=$(boot)
-diff --git a/arch/x86_64/ia32/Makefile b/arch/x86_64/ia32/Makefile
-index 929e6b0..6dcd7cf 100644
---- a/arch/x86_64/ia32/Makefile
-+++ b/arch/x86_64/ia32/Makefile
-@@ -23,9 +23,25 @@ quiet_cmd_syscall = SYSCALL $@
- -Wl,-soname=linux-gate.so.1 -o $@ \
- -Wl,-T,$(filter-out FORCE,$^)
-
-+$(obj)/vsyscall-int80.so \
- $(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
- $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
- $(call if_changed,syscall)
-
--AFLAGS_vsyscall-sysenter.o = -m32
--AFLAGS_vsyscall-syscall.o = -m32
-+AFLAGS_vsyscall-sysenter.o = -m32 -Iarch/i386/kernel
-+AFLAGS_vsyscall-syscall.o = -m32 -Iarch/i386/kernel
-+
-+ifdef CONFIG_XEN
-+AFLAGS_vsyscall-int80.o = -m32 -Iarch/i386/kernel
-+CFLAGS_syscall32-xen.o += -DUSE_INT80
-+AFLAGS_syscall32_syscall-xen.o += -DUSE_INT80
-+
-+$(obj)/syscall32_syscall-xen.o: \
-+ $(foreach F,int80 sysenter syscall,$(obj)/vsyscall-$F.so)
-+
-+targets := $(foreach F,int80 sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
-+
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff --git a/arch/x86_64/ia32/ia32entry-xen.S b/arch/x86_64/ia32/ia32entry-xen.S
-new file mode 100644
-index 0000000..73129e2
---- /dev/null
-+++ b/arch/x86_64/ia32/ia32entry-xen.S
-@@ -0,0 +1,721 @@
-+/*
-+ * Compatibility mode system call entry point for x86-64.
-+ *
-+ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
-+ */
-+
-+#include <asm/dwarf2.h>
-+#include <asm/calling.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/current.h>
-+#include <asm/errno.h>
-+#include <asm/ia32_unistd.h>
-+#include <asm/thread_info.h>
-+#include <asm/segment.h>
-+#include <asm/vsyscall32.h>
-+#include <linux/linkage.h>
-+
-+#define __XEN_X86_64 1
-+
-+ .macro IA32_ARG_FIXUP noebp=0
-+ movl %edi,%r8d
-+ .if \noebp
-+ .else
-+ movl %ebp,%r9d
-+ .endif
-+ xchg %ecx,%esi
-+ movl %ebx,%edi
-+ movl %edx,%edx /* zero extension */
-+ .endm
-+
-+ /* clobbers %eax */
-+ .macro CLEAR_RREGS
-+ xorl %eax,%eax
-+ movq %rax,R11(%rsp)
-+ movq %rax,R10(%rsp)
-+ movq %rax,R9(%rsp)
-+ movq %rax,R8(%rsp)
-+ .endm
-+
-+#if defined (__XEN_X86_64)
-+#include "../kernel/xen_entry.S"
-+
-+#define __swapgs
-+#define __cli
-+#define __sti
-+#else
-+/*
-+ * Use the native instructions
-+ */
-+#define __swapgs swapgs
-+#define __cli cli
-+#define __sti sti
-+#endif
-+
-+ .macro CFI_STARTPROC32 simple
-+ CFI_STARTPROC \simple
-+ CFI_UNDEFINED r8
-+ CFI_UNDEFINED r9
-+ CFI_UNDEFINED r10
-+ CFI_UNDEFINED r11
-+ CFI_UNDEFINED r12
-+ CFI_UNDEFINED r13
-+ CFI_UNDEFINED r14
-+ CFI_UNDEFINED r15
-+ .endm
-+
-+/*
-+ * 32bit SYSENTER instruction entry.
-+ *
-+ * Arguments:
-+ * %eax System call number.
-+ * %ebx Arg1
-+ * %ecx Arg2
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp user stack
-+ * 0(%ebp) Arg6
-+ *
-+ * Interrupts off.
-+ *
-+ * This is purely a fast path. For anything complicated we use the int 0x80
-+ * path below. Set up a complete hardware stack frame to share code
-+ * with the int 0x80 path.
-+ */
-+ENTRY(ia32_sysenter_target)
-+ CFI_STARTPROC32 simple
-+ CFI_DEF_CFA rsp,0
-+ CFI_REGISTER rsp,rbp
-+ __swapgs
-+ movq %gs:pda_kernelstack, %rsp
-+ addq $(PDA_STACKOFFSET),%rsp
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ __sti
-+ movl %ebp,%ebp /* zero extension */
-+ pushq $__USER32_DS
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET ss,0*/
-+ pushq %rbp
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rsp,0
-+ pushfq
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET rflags,0*/
-+ movl $VSYSCALL32_SYSEXIT, %r10d
-+ CFI_REGISTER rip,r10
-+ pushq $__USER32_CS
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET cs,0*/
-+ movl %eax, %eax
-+ pushq %r10
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rip,0
-+ pushq %rax
-+ CFI_ADJUST_CFA_OFFSET 8
-+ cld
-+ SAVE_ARGS 0,0,1
-+ /* no need to do an access_ok check here because rbp has been
-+ 32bit zero extended */
-+1: movl (%rbp),%r9d
-+ .section __ex_table,"a"
-+ .quad 1b,ia32_badarg
-+ .previous
-+ GET_THREAD_INFO(%r10)
-+ orl $TS_COMPAT,threadinfo_status(%r10)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+ CFI_REMEMBER_STATE
-+ jnz sysenter_tracesys
-+sysenter_do_call:
-+ cmpl $(IA32_NR_syscalls),%eax
-+ jae ia32_badsys
-+ IA32_ARG_FIXUP 1
-+ call *ia32_sys_call_table(,%rax,8)
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+ GET_THREAD_INFO(%r10)
-+ XEN_BLOCK_EVENTS(%r11)
-+ __cli
-+ testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-+ jnz int_ret_from_sys_call
-+ andl $~TS_COMPAT,threadinfo_status(%r10)
-+ /* clear IF, that popfq doesn't enable interrupts early */
-+ andl $~0x200,EFLAGS-R11(%rsp)
-+ RESTORE_ARGS 1,24,1,1,1,1
-+ popfq
-+ CFI_ADJUST_CFA_OFFSET -8
-+ /*CFI_RESTORE rflags*/
-+ popq %rcx /* User %esp */
-+ CFI_ADJUST_CFA_OFFSET -8
-+ CFI_REGISTER rsp,rcx
-+ movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */
-+ CFI_REGISTER rip,rdx
-+ __swapgs
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ __sti /* sti only takes effect after the next instruction */
-+ /* sysexit */
-+ .byte 0xf, 0x35 /* TBD */
-+
-+sysenter_tracesys:
-+ CFI_RESTORE_STATE
-+ SAVE_REST
-+ CLEAR_RREGS
-+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
-+ movq %rsp,%rdi /* &pt_regs -> arg1 */
-+ call syscall_trace_enter
-+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ RESTORE_REST
-+ movl %ebp, %ebp
-+ /* no need to do an access_ok check here because rbp has been
-+ 32bit zero extended */
-+1: movl (%rbp),%r9d
-+ .section __ex_table,"a"
-+ .quad 1b,ia32_badarg
-+ .previous
-+ jmp sysenter_do_call
-+ CFI_ENDPROC
-+
-+/*
-+ * 32bit SYSCALL instruction entry.
-+ *
-+ * Arguments:
-+ * %eax System call number.
-+ * %ebx Arg1
-+ * %ecx return EIP
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
-+ * %esp user stack
-+ * 0(%esp) Arg6
-+ *
-+ * Interrupts off.
-+ *
-+ * This is purely a fast path. For anything complicated we use the int 0x80
-+ * path below. Set up a complete hardware stack frame to share code
-+ * with the int 0x80 path.
-+ */
-+ENTRY(ia32_cstar_target)
-+ CFI_STARTPROC32 simple
-+ CFI_DEF_CFA rsp,0
-+ CFI_REGISTER rip,rcx
-+ /*CFI_REGISTER rflags,r11*/
-+ __swapgs
-+ movl %esp,%r8d
-+ CFI_REGISTER rsp,r8
-+ movq %gs:pda_kernelstack,%rsp
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ __sti
-+ SAVE_ARGS 8,1,1
-+ movl %eax,%eax /* zero extension */
-+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
-+ movq %rcx,RIP-ARGOFFSET(%rsp)
-+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
-+ movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
-+ movl %ebp,%ecx
-+ movq $__USER32_CS,CS-ARGOFFSET(%rsp)
-+ movq $__USER32_DS,SS-ARGOFFSET(%rsp)
-+ movq %r11,EFLAGS-ARGOFFSET(%rsp)
-+ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
-+ movq %r8,RSP-ARGOFFSET(%rsp)
-+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
-+ /* no need to do an access_ok check here because r8 has been
-+ 32bit zero extended */
-+ /* hardware stack frame is complete now */
-+1: movl (%r8),%r9d
-+ .section __ex_table,"a"
-+ .quad 1b,ia32_badarg
-+ .previous
-+ GET_THREAD_INFO(%r10)
-+ orl $TS_COMPAT,threadinfo_status(%r10)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+ CFI_REMEMBER_STATE
-+ jnz cstar_tracesys
-+cstar_do_call:
-+ cmpl $IA32_NR_syscalls,%eax
-+ jae ia32_badsys
-+ IA32_ARG_FIXUP 1
-+ call *ia32_sys_call_table(,%rax,8)
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+ GET_THREAD_INFO(%r10)
-+ XEN_BLOCK_EVENTS(%r11)
-+ __cli
-+ testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-+ jnz int_ret_from_sys_call
-+ andl $~TS_COMPAT,threadinfo_status(%r10)
-+ RESTORE_ARGS 1,-ARG_SKIP,1,1,1
-+ movl RIP-ARGOFFSET(%rsp),%ecx
-+ CFI_REGISTER rip,rcx
-+ movl EFLAGS-ARGOFFSET(%rsp),%r11d
-+ /*CFI_REGISTER rflags,r11*/
-+ movl RSP-ARGOFFSET(%rsp),%esp
-+ CFI_RESTORE rsp
-+ __swapgs
-+ sysretl /* TBD */
-+
-+cstar_tracesys:
-+ CFI_RESTORE_STATE
-+ SAVE_REST
-+ CLEAR_RREGS
-+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
-+ movq %rsp,%rdi /* &pt_regs -> arg1 */
-+ call syscall_trace_enter
-+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ RESTORE_REST
-+ movl RSP-ARGOFFSET(%rsp), %r8d
-+ /* no need to do an access_ok check here because r8 has been
-+ 32bit zero extended */
-+1: movl (%r8),%r9d
-+ .section __ex_table,"a"
-+ .quad 1b,ia32_badarg
-+ .previous
-+ jmp cstar_do_call
-+
-+ia32_badarg:
-+ movq $-EFAULT,%rax
-+ jmp ia32_sysret
-+ CFI_ENDPROC
-+
-+/*
-+ * Emulated IA32 system calls via int 0x80.
-+ *
-+ * Arguments:
-+ * %eax System call number.
-+ * %ebx Arg1
-+ * %ecx Arg2
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
-+ *
-+ * Notes:
-+ * Uses the same stack frame as the x86-64 version.
-+ * All registers except %eax must be saved (but ptrace may violate that)
-+ * Arguments are zero extended. For system calls that want sign extension and
-+ * take long arguments a wrapper is needed. Most calls can just be called
-+ * directly.
-+ * Assumes it is only called from user space and entered with interrupts off.
-+ */
-+
-+ENTRY(ia32_syscall)
-+ CFI_STARTPROC simple
-+ CFI_DEF_CFA rsp,SS+8-RIP
-+ /*CFI_REL_OFFSET ss,SS-RIP*/
-+ CFI_REL_OFFSET rsp,RSP-RIP
-+ /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
-+ /*CFI_REL_OFFSET cs,CS-RIP*/
-+ CFI_REL_OFFSET rip,RIP-RIP
-+ __swapgs
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ __sti
-+ movq (%rsp),%rcx
-+ movq 8(%rsp),%r11
-+ addq $0x10,%rsp /* skip rcx and r11 */
-+ movl %eax,%eax
-+ pushq %rax
-+ CFI_ADJUST_CFA_OFFSET 8
-+ cld
-+/* 1: jmp 1b */
-+ /* note the registers are not zero extended to the sf.
-+ this could be a problem. */
-+ SAVE_ARGS 0,0,1
-+ GET_THREAD_INFO(%r10)
-+ orl $TS_COMPAT,threadinfo_status(%r10)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+ jnz ia32_tracesys
-+ia32_do_syscall:
-+ cmpl $(IA32_NR_syscalls),%eax
-+ jae ia32_badsys
-+ IA32_ARG_FIXUP
-+ call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
-+ia32_sysret:
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+ jmp int_ret_from_sys_call
-+
-+ia32_tracesys:
-+ SAVE_REST
-+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
-+ movq %rsp,%rdi /* &pt_regs -> arg1 */
-+ call syscall_trace_enter
-+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ RESTORE_REST
-+ jmp ia32_do_syscall
-+
-+ia32_badsys:
-+ movq $0,ORIG_RAX-ARGOFFSET(%rsp)
-+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-+ jmp int_ret_from_sys_call
-+
-+ni_syscall:
-+ movq %rax,%rdi
-+ jmp sys32_ni_syscall
-+
-+quiet_ni_syscall:
-+ movq $-ENOSYS,%rax
-+ ret
-+ CFI_ENDPROC
-+
-+ .macro PTREGSCALL label, func, arg
-+ .globl \label
-+\label:
-+ leaq \func(%rip),%rax
-+ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
-+ jmp ia32_ptregs_common
-+ .endm
-+
-+ CFI_STARTPROC32
-+
-+ PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
-+ PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
-+ PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
-+ PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
-+ PTREGSCALL stub32_execve, sys32_execve, %rcx
-+ PTREGSCALL stub32_fork, sys_fork, %rdi
-+ PTREGSCALL stub32_clone, sys32_clone, %rdx
-+ PTREGSCALL stub32_vfork, sys_vfork, %rdi
-+ PTREGSCALL stub32_iopl, sys_iopl, %rsi
-+ PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
-+
-+ENTRY(ia32_ptregs_common)
-+ popq %r11
-+ CFI_ENDPROC
-+ CFI_STARTPROC32 simple
-+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
-+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
-+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
-+ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
-+ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
-+ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
-+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
-+/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
-+/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
-+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
-+/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
-+ SAVE_REST
-+ call *%rax
-+ RESTORE_REST
-+ jmp ia32_sysret /* misbalances the return cache */
-+ CFI_ENDPROC
-+
-+ .section .rodata,"a"
-+ .align 8
-+ .globl ia32_sys_call_table
-+ia32_sys_call_table:
-+ .quad sys_restart_syscall
-+ .quad sys_exit
-+ .quad stub32_fork
-+ .quad sys_read
-+ .quad sys_write
-+ .quad compat_sys_open /* 5 */
-+ .quad sys_close
-+ .quad sys32_waitpid
-+ .quad sys_creat
-+ .quad sys_link
-+ .quad sys_unlink /* 10 */
-+ .quad stub32_execve
-+ .quad sys_chdir
-+ .quad compat_sys_time
-+ .quad sys_mknod
-+ .quad sys_chmod /* 15 */
-+ .quad sys_lchown16
-+ .quad quiet_ni_syscall /* old break syscall holder */
-+ .quad sys_stat
-+ .quad sys32_lseek
-+ .quad sys_getpid /* 20 */
-+ .quad compat_sys_mount /* mount */
-+ .quad sys_oldumount /* old_umount */
-+ .quad sys_setuid16
-+ .quad sys_getuid16
-+ .quad compat_sys_stime /* stime */ /* 25 */
-+ .quad sys32_ptrace /* ptrace */
-+ .quad sys_alarm
-+ .quad sys_fstat /* (old)fstat */
-+ .quad sys_pause
-+ .quad compat_sys_utime /* 30 */
-+ .quad quiet_ni_syscall /* old stty syscall holder */
-+ .quad quiet_ni_syscall /* old gtty syscall holder */
-+ .quad sys_access
-+ .quad sys_nice
-+ .quad quiet_ni_syscall /* 35 */ /* old ftime syscall holder */
-+ .quad sys_sync
-+ .quad sys32_kill
-+ .quad sys_rename
-+ .quad sys_mkdir
-+ .quad sys_rmdir /* 40 */
-+ .quad sys_dup
-+ .quad sys32_pipe
-+ .quad compat_sys_times
-+ .quad quiet_ni_syscall /* old prof syscall holder */
-+ .quad sys_brk /* 45 */
-+ .quad sys_setgid16
-+ .quad sys_getgid16
-+ .quad sys_signal
-+ .quad sys_geteuid16
-+ .quad sys_getegid16 /* 50 */
-+ .quad sys_acct
-+ .quad sys_umount /* new_umount */
-+ .quad quiet_ni_syscall /* old lock syscall holder */
-+ .quad compat_sys_ioctl
-+ .quad compat_sys_fcntl64 /* 55 */
-+ .quad quiet_ni_syscall /* old mpx syscall holder */
-+ .quad sys_setpgid
-+ .quad quiet_ni_syscall /* old ulimit syscall holder */
-+ .quad sys32_olduname
-+ .quad sys_umask /* 60 */
-+ .quad sys_chroot
-+ .quad sys32_ustat
-+ .quad sys_dup2
-+ .quad sys_getppid
-+ .quad sys_getpgrp /* 65 */
-+ .quad sys_setsid
-+ .quad sys32_sigaction
-+ .quad sys_sgetmask
-+ .quad sys_ssetmask
-+ .quad sys_setreuid16 /* 70 */
-+ .quad sys_setregid16
-+ .quad stub32_sigsuspend
-+ .quad compat_sys_sigpending
-+ .quad sys_sethostname
-+ .quad compat_sys_setrlimit /* 75 */
-+ .quad compat_sys_old_getrlimit /* old_getrlimit */
-+ .quad compat_sys_getrusage
-+ .quad sys32_gettimeofday
-+ .quad sys32_settimeofday
-+ .quad sys_getgroups16 /* 80 */
-+ .quad sys_setgroups16
-+ .quad sys32_old_select
-+ .quad sys_symlink
-+ .quad sys_lstat
-+ .quad sys_readlink /* 85 */
-+#ifdef CONFIG_IA32_AOUT
-+ .quad sys_uselib
-+#else
-+ .quad quiet_ni_syscall
-+#endif
-+ .quad sys_swapon
-+ .quad sys_reboot
-+ .quad compat_sys_old_readdir
-+ .quad sys32_mmap /* 90 */
-+ .quad sys_munmap
-+ .quad sys_truncate
-+ .quad sys_ftruncate
-+ .quad sys_fchmod
-+ .quad sys_fchown16 /* 95 */
-+ .quad sys_getpriority
-+ .quad sys_setpriority
-+ .quad quiet_ni_syscall /* old profil syscall holder */
-+ .quad compat_sys_statfs
-+ .quad compat_sys_fstatfs /* 100 */
-+ .quad sys_ioperm
-+ .quad compat_sys_socketcall
-+ .quad sys_syslog
-+ .quad compat_sys_setitimer
-+ .quad compat_sys_getitimer /* 105 */
-+ .quad compat_sys_newstat
-+ .quad compat_sys_newlstat
-+ .quad compat_sys_newfstat
-+ .quad sys32_uname
-+ .quad stub32_iopl /* 110 */
-+ .quad sys_vhangup
-+ .quad quiet_ni_syscall /* old "idle" system call */
-+ .quad sys32_vm86_warning /* vm86old */
-+ .quad compat_sys_wait4
-+ .quad sys_swapoff /* 115 */
-+ .quad sys32_sysinfo
-+ .quad sys32_ipc
-+ .quad sys_fsync
-+ .quad stub32_sigreturn
-+ .quad stub32_clone /* 120 */
-+ .quad sys_setdomainname
-+ .quad sys_uname
-+ .quad sys_modify_ldt
-+ .quad sys32_adjtimex
-+ .quad sys32_mprotect /* 125 */
-+ .quad compat_sys_sigprocmask
-+ .quad quiet_ni_syscall /* create_module */
-+ .quad sys_init_module
-+ .quad sys_delete_module
-+ .quad quiet_ni_syscall /* 130 get_kernel_syms */
-+ .quad sys_quotactl
-+ .quad sys_getpgid
-+ .quad sys_fchdir
-+ .quad quiet_ni_syscall /* bdflush */
-+ .quad sys_sysfs /* 135 */
-+ .quad sys_personality
-+ .quad quiet_ni_syscall /* for afs_syscall */
-+ .quad sys_setfsuid16
-+ .quad sys_setfsgid16
-+ .quad sys_llseek /* 140 */
-+ .quad compat_sys_getdents
-+ .quad compat_sys_select
-+ .quad sys_flock
-+ .quad sys_msync
-+ .quad compat_sys_readv /* 145 */
-+ .quad compat_sys_writev
-+ .quad sys_getsid
-+ .quad sys_fdatasync
-+ .quad sys32_sysctl /* sysctl */
-+ .quad sys_mlock /* 150 */
-+ .quad sys_munlock
-+ .quad sys_mlockall
-+ .quad sys_munlockall
-+ .quad sys_sched_setparam
-+ .quad sys_sched_getparam /* 155 */
-+ .quad sys_sched_setscheduler
-+ .quad sys_sched_getscheduler
-+ .quad sys_sched_yield
-+ .quad sys_sched_get_priority_max
-+ .quad sys_sched_get_priority_min /* 160 */
-+ .quad sys_sched_rr_get_interval
-+ .quad compat_sys_nanosleep
-+ .quad sys_mremap
-+ .quad sys_setresuid16
-+ .quad sys_getresuid16 /* 165 */
-+ .quad sys32_vm86_warning /* vm86 */
-+ .quad quiet_ni_syscall /* query_module */
-+ .quad sys_poll
-+ .quad compat_sys_nfsservctl
-+ .quad sys_setresgid16 /* 170 */
-+ .quad sys_getresgid16
-+ .quad sys_prctl
-+ .quad stub32_rt_sigreturn
-+ .quad sys32_rt_sigaction
-+ .quad sys32_rt_sigprocmask /* 175 */
-+ .quad sys32_rt_sigpending
-+ .quad compat_sys_rt_sigtimedwait
-+ .quad sys32_rt_sigqueueinfo
-+ .quad stub32_rt_sigsuspend
-+ .quad sys32_pread /* 180 */
-+ .quad sys32_pwrite
-+ .quad sys_chown16
-+ .quad sys_getcwd
-+ .quad sys_capget
-+ .quad sys_capset
-+ .quad stub32_sigaltstack
-+ .quad sys32_sendfile
-+ .quad quiet_ni_syscall /* streams1 */
-+ .quad quiet_ni_syscall /* streams2 */
-+ .quad stub32_vfork /* 190 */
-+ .quad compat_sys_getrlimit
-+ .quad sys32_mmap2
-+ .quad sys32_truncate64
-+ .quad sys32_ftruncate64
-+ .quad sys32_stat64 /* 195 */
-+ .quad sys32_lstat64
-+ .quad sys32_fstat64
-+ .quad sys_lchown
-+ .quad sys_getuid
-+ .quad sys_getgid /* 200 */
-+ .quad sys_geteuid
-+ .quad sys_getegid
-+ .quad sys_setreuid
-+ .quad sys_setregid
-+ .quad sys_getgroups /* 205 */
-+ .quad sys_setgroups
-+ .quad sys_fchown
-+ .quad sys_setresuid
-+ .quad sys_getresuid
-+ .quad sys_setresgid /* 210 */
-+ .quad sys_getresgid
-+ .quad sys_chown
-+ .quad sys_setuid
-+ .quad sys_setgid
-+ .quad sys_setfsuid /* 215 */
-+ .quad sys_setfsgid
-+ .quad sys_pivot_root
-+ .quad sys_mincore
-+ .quad sys_madvise
-+ .quad compat_sys_getdents64 /* 220 getdents64 */
-+ .quad compat_sys_fcntl64
-+ .quad quiet_ni_syscall /* tux */
-+ .quad quiet_ni_syscall /* security */
-+ .quad sys_gettid
-+ .quad sys_readahead /* 225 */
-+ .quad sys_setxattr
-+ .quad sys_lsetxattr
-+ .quad sys_fsetxattr
-+ .quad sys_getxattr
-+ .quad sys_lgetxattr /* 230 */
-+ .quad sys_fgetxattr
-+ .quad sys_listxattr
-+ .quad sys_llistxattr
-+ .quad sys_flistxattr
-+ .quad sys_removexattr /* 235 */
-+ .quad sys_lremovexattr
-+ .quad sys_fremovexattr
-+ .quad sys_tkill
-+ .quad sys_sendfile64
-+ .quad compat_sys_futex /* 240 */
-+ .quad compat_sys_sched_setaffinity
-+ .quad compat_sys_sched_getaffinity
-+ .quad sys32_set_thread_area
-+ .quad sys32_get_thread_area
-+ .quad compat_sys_io_setup /* 245 */
-+ .quad sys_io_destroy
-+ .quad compat_sys_io_getevents
-+ .quad compat_sys_io_submit
-+ .quad sys_io_cancel
-+ .quad sys_fadvise64 /* 250 */
-+ .quad quiet_ni_syscall /* free_huge_pages */
-+ .quad sys_exit_group
-+ .quad sys32_lookup_dcookie
-+ .quad sys_epoll_create
-+ .quad sys_epoll_ctl /* 255 */
-+ .quad sys_epoll_wait
-+ .quad sys_remap_file_pages
-+ .quad sys_set_tid_address
-+ .quad compat_sys_timer_create
-+ .quad compat_sys_timer_settime /* 260 */
-+ .quad compat_sys_timer_gettime
-+ .quad sys_timer_getoverrun
-+ .quad sys_timer_delete
-+ .quad compat_sys_clock_settime
-+ .quad compat_sys_clock_gettime /* 265 */
-+ .quad compat_sys_clock_getres
-+ .quad compat_sys_clock_nanosleep
-+ .quad compat_sys_statfs64
-+ .quad compat_sys_fstatfs64
-+ .quad sys_tgkill /* 270 */
-+ .quad compat_sys_utimes
-+ .quad sys32_fadvise64_64
-+ .quad quiet_ni_syscall /* sys_vserver */
-+ .quad sys_mbind
-+ .quad compat_sys_get_mempolicy /* 275 */
-+ .quad sys_set_mempolicy
-+ .quad compat_sys_mq_open
-+ .quad sys_mq_unlink
-+ .quad compat_sys_mq_timedsend
-+ .quad compat_sys_mq_timedreceive /* 280 */
-+ .quad compat_sys_mq_notify
-+ .quad compat_sys_mq_getsetattr
-+ .quad compat_sys_kexec_load /* reserved for kexec */
-+ .quad compat_sys_waitid
-+ .quad quiet_ni_syscall /* 285: sys_altroot */
-+ .quad sys_add_key
-+ .quad sys_request_key
-+ .quad sys_keyctl
-+ .quad sys_ioprio_set
-+ .quad sys_ioprio_get /* 290 */
-+ .quad sys_inotify_init
-+ .quad sys_inotify_add_watch
-+ .quad sys_inotify_rm_watch
-+ .quad sys_migrate_pages
-+ .quad compat_sys_openat /* 295 */
-+ .quad sys_mkdirat
-+ .quad sys_mknodat
-+ .quad sys_fchownat
-+ .quad compat_sys_futimesat
-+ .quad sys32_fstatat /* 300 */
-+ .quad sys_unlinkat
-+ .quad sys_renameat
-+ .quad sys_linkat
-+ .quad sys_symlinkat
-+ .quad sys_readlinkat /* 305 */
-+ .quad sys_fchmodat
-+ .quad sys_faccessat
-+ .quad sys_ni_syscall /* pselect6 for now */
-+ .quad sys_ni_syscall /* ppoll for now */
-+ .quad sys_unshare /* 310 */
-+ia32_syscall_end:
-+ .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
-+ .quad ni_syscall
-+ .endr
-diff --git a/arch/x86_64/ia32/syscall32-xen.c b/arch/x86_64/ia32/syscall32-xen.c
-new file mode 100644
-index 0000000..e99a6f7
---- /dev/null
-+++ b/arch/x86_64/ia32/syscall32-xen.c
-@@ -0,0 +1,128 @@
-+/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
-+
-+/* vsyscall handling for 32bit processes. Map a stub page into it
-+ on demand because 32bit cannot reach the kernel's fixmaps */
-+
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/gfp.h>
-+#include <linux/init.h>
-+#include <linux/stringify.h>
-+#include <linux/security.h>
-+#include <asm/proto.h>
-+#include <asm/tlbflush.h>
-+#include <asm/ia32_unistd.h>
-+
-+#ifdef USE_INT80
-+extern unsigned char syscall32_int80[], syscall32_int80_end[];
-+#endif
-+extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
-+extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
-+extern int sysctl_vsyscall32;
-+
-+char *syscall32_page;
-+#ifndef USE_INT80
-+static int use_sysenter = -1;
-+#endif
-+
-+static struct page *
-+syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
-+{
-+ struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
-+ get_page(p);
-+ return p;
-+}
-+
-+/* Prevent VMA merging */
-+static void syscall32_vma_close(struct vm_area_struct *vma)
-+{
-+}
-+
-+static struct vm_operations_struct syscall32_vm_ops = {
-+ .close = syscall32_vma_close,
-+ .nopage = syscall32_nopage,
-+};
-+
-+struct linux_binprm;
-+
-+/* Setup a VMA at program startup for the vsyscall page */
-+int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
-+{
-+ int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
-+ struct vm_area_struct *vma;
-+ struct mm_struct *mm = current->mm;
-+ int ret;
-+
-+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-+ if (!vma)
-+ return -ENOMEM;
-+
-+ memset(vma, 0, sizeof(struct vm_area_struct));
-+ /* Could randomize here */
-+ vma->vm_start = VSYSCALL32_BASE;
-+ vma->vm_end = VSYSCALL32_END;
-+ /* MAYWRITE to allow gdb to COW and set breakpoints */
-+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
-+ vma->vm_flags |= mm->def_flags;
-+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
-+ vma->vm_ops = &syscall32_vm_ops;
-+ vma->vm_mm = mm;
-+
-+ down_write(&mm->mmap_sem);
-+ if ((ret = insert_vm_struct(mm, vma))) {
-+ up_write(&mm->mmap_sem);
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return ret;
-+ }
-+ mm->total_vm += npages;
-+ up_write(&mm->mmap_sem);
-+ return 0;
-+}
-+
-+static int __init init_syscall32(void)
-+{
-+ syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
-+ if (!syscall32_page)
-+ panic("Cannot allocate syscall32 page");
-+
-+#ifdef USE_INT80
-+ /*
-+ * At this point we use int 0x80.
-+ */
-+ memcpy(syscall32_page, syscall32_int80,
-+ syscall32_int80_end - syscall32_int80);
-+#else
-+ if (use_sysenter > 0) {
-+ memcpy(syscall32_page, syscall32_sysenter,
-+ syscall32_sysenter_end - syscall32_sysenter);
-+ } else {
-+ memcpy(syscall32_page, syscall32_syscall,
-+ syscall32_syscall_end - syscall32_syscall);
-+ }
-+#endif
-+ return 0;
-+}
-+
-+/*
-+ * This must be done early in case we have an initrd containing 32-bit
-+ * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
-+ */
-+core_initcall(init_syscall32);
-+
-+/* May not be __init: called during resume */
-+void syscall32_cpu_init(void)
-+{
-+#ifndef USE_INT80
-+ if (use_sysenter < 0)
-+ use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
-+
-+ /* Load these always in case some future AMD CPU supports
-+ SYSENTER from compat mode too. */
-+ checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-+ checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-+ checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
-+
-+ wrmsrl(MSR_CSTAR, ia32_cstar_target);
-+#endif
-+}
-diff --git a/arch/x86_64/ia32/syscall32_syscall-xen.S b/arch/x86_64/ia32/syscall32_syscall-xen.S
-new file mode 100644
-index 0000000..749b198
---- /dev/null
-+++ b/arch/x86_64/ia32/syscall32_syscall-xen.S
-@@ -0,0 +1,28 @@
-+/* 32bit VDSOs mapped into user space. */
-+
-+ .section ".init.data","aw"
-+
-+#ifdef USE_INT80
-+
-+ .globl syscall32_int80
-+ .globl syscall32_int80_end
-+
-+syscall32_int80:
-+ .incbin "arch/x86_64/ia32/vsyscall-int80.so"
-+syscall32_int80_end:
-+
-+#endif
-+
-+ .globl syscall32_syscall
-+ .globl syscall32_syscall_end
-+
-+syscall32_syscall:
-+ .incbin "arch/x86_64/ia32/vsyscall-syscall.so"
-+syscall32_syscall_end:
-+
-+ .globl syscall32_sysenter
-+ .globl syscall32_sysenter_end
-+
-+syscall32_sysenter:
-+ .incbin "arch/x86_64/ia32/vsyscall-sysenter.so"
-+syscall32_sysenter_end:
-diff --git a/arch/x86_64/ia32/vsyscall-int80.S b/arch/x86_64/ia32/vsyscall-int80.S
-new file mode 100644
-index 0000000..00e4148
---- /dev/null
-+++ b/arch/x86_64/ia32/vsyscall-int80.S
-@@ -0,0 +1,58 @@
-+/*
-+ * Code for the vsyscall page. This version uses the old int $0x80 method.
-+ *
-+ * NOTE:
-+ * 1) __kernel_vsyscall _must_ be first in this page.
-+ * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
-+ * for details.
-+ */
-+#include <asm/ia32_unistd.h>
-+#include <asm/asm-offsets.h>
-+
-+ .code32
-+ .text
-+ .section .text.vsyscall,"ax"
-+ .globl __kernel_vsyscall
-+ .type __kernel_vsyscall,@function
-+__kernel_vsyscall:
-+.LSTART_vsyscall:
-+ int $0x80
-+ ret
-+.LEND_vsyscall:
-+ .size __kernel_vsyscall,.-.LSTART_vsyscall
-+ .previous
-+
-+ .section .eh_frame,"a",@progbits
-+.LSTARTFRAME:
-+ .long .LENDCIE-.LSTARTCIE
-+.LSTARTCIE:
-+ .long 0 /* CIE ID */
-+ .byte 1 /* Version number */
-+ .string "zR" /* NUL-terminated augmentation string */
-+ .uleb128 1 /* Code alignment factor */
-+ .sleb128 -4 /* Data alignment factor */
-+ .byte 8 /* Return address register column */
-+ .uleb128 1 /* Augmentation value length */
-+ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-+ .byte 0x0c /* DW_CFA_def_cfa */
-+ .uleb128 4
-+ .uleb128 4
-+ .byte 0x88 /* DW_CFA_offset, column 0x8 */
-+ .uleb128 1
-+ .align 4
-+.LENDCIE:
-+
-+ .long .LENDFDE1-.LSTARTFDE1 /* Length FDE */
-+.LSTARTFDE1:
-+ .long .LSTARTFDE1-.LSTARTFRAME /* CIE pointer */
-+ .long .LSTART_vsyscall-. /* PC-relative start address */
-+ .long .LEND_vsyscall-.LSTART_vsyscall
-+ .uleb128 0 /* Augmentation length */
-+ .align 4
-+.LENDFDE1:
-+
-+/*
-+ * Get the common code for the sigreturn entry points.
-+ */
-+#define SYSCALL_ENTER_KERNEL int $0x80
-+#include "vsyscall-sigreturn.S"
-diff --git a/arch/x86_64/ia32/vsyscall-sigreturn.S b/arch/x86_64/ia32/vsyscall-sigreturn.S
-index d90321f..3947f45 100644
---- a/arch/x86_64/ia32/vsyscall-sigreturn.S
-+++ b/arch/x86_64/ia32/vsyscall-sigreturn.S
-@@ -120,5 +120,5 @@ __kernel_rt_sigreturn:
- .align 4
- .LENDFDE3:
-
--#include "../../i386/kernel/vsyscall-note.S"
-+#include <vsyscall-note.S>
-
-diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
-index a098a11..e904c36 100644
---- a/arch/x86_64/kernel/Makefile
-+++ b/arch/x86_64/kernel/Makefile
-@@ -20,11 +20,13 @@ obj-$(CONFIG_MICROCODE) += microcode.o
- obj-$(CONFIG_X86_CPUID) += cpuid.o
- obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o
- obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
-+obj-$(CONFIG_X86_XEN_GENAPIC) += genapic.o genapic_xen.o
- obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o \
- genapic.o genapic_cluster.o genapic_flat.o
- obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
- obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
--obj-$(CONFIG_PM) += suspend.o
-+obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend.o
-+obj-$(CONFIG_ACPI_SLEEP) += suspend.o
- obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
- obj-$(CONFIG_CPU_FREQ) += cpufreq/
- obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-@@ -51,3 +53,17 @@ i8237-y += ../../i386/kernel/i8237.o
- msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
- dmi_scan-y += ../../i386/kernel/dmi_scan.o
-
-+ifdef CONFIG_XEN
-+time-y += ../../i386/kernel/time-xen.o
-+pci-dma-y += ../../i386/kernel/pci-dma-xen.o
-+microcode-$(subst m,y,$(CONFIG_MICROCODE)) := ../../i386/kernel/microcode-xen.o
-+quirks-y := ../../i386/kernel/quirks-xen.o
-+
-+n-obj-xen := i8259.o reboot.o i8237.o smpboot.o trampoline.o genapic_cluster.o genapic_flat.o
-+
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
-+obj-y := $(call cherrypickxen, $(obj-y))
-+extra-y := $(call cherrypickxen, $(extra-y))
-+endif
-diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
-index 4fe9707..aa84f6e 100644
---- a/arch/x86_64/kernel/acpi/Makefile
-+++ b/arch/x86_64/kernel/acpi/Makefile
-@@ -6,3 +6,4 @@ ifneq ($(CONFIG_ACPI_PROCESSOR),)
- obj-y += processor.o
- endif
-
-+boot-$(CONFIG_XEN) := ../../../i386/kernel/acpi/boot-xen.o
-diff --git a/arch/x86_64/kernel/apic-xen.c b/arch/x86_64/kernel/apic-xen.c
-new file mode 100644
-index 0000000..f27b0e3
---- /dev/null
-+++ b/arch/x86_64/kernel/apic-xen.c
-@@ -0,0 +1,200 @@
-+/*
-+ * Local APIC handling, local APIC timers
-+ *
-+ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
-+ * thanks to Eric Gilmore
-+ * and Rolf G. Tews
-+ * for testing these extensively.
-+ * Maciej W. Rozycki : Various updates and fixes.
-+ * Mikael Pettersson : Power Management for UP-APIC.
-+ * Pavel Machek and
-+ * Mikael Pettersson : PM converted to driver model.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/sysdev.h>
-+#include <linux/module.h>
-+
-+#include <asm/atomic.h>
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/hpet.h>
-+#include <asm/idle.h>
-+
-+int apic_verbosity;
-+
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+ printk("unexpected IRQ trap at vector %02x\n", irq);
-+ /*
-+ * Currently unexpected vectors happen only on SMP and APIC.
-+ * We _must_ ack these because every local APIC has only N
-+ * irq slots per priority level, and a 'hanging, unacked' IRQ
-+ * holds up an irq slot - in excessive cases (when multiple
-+ * unexpected vectors occur) that might lock up the APIC
-+ * completely.
-+ * But don't ack when the APIC is disabled. -AK
-+ */
-+ if (!disable_apic)
-+ ack_APIC_irq();
-+}
-+
-+int setup_profiling_timer(unsigned int multiplier)
-+{
-+ return -EINVAL;
-+}
-+
-+void smp_local_timer_interrupt(struct pt_regs *regs)
-+{
-+ profile_tick(CPU_PROFILING, regs);
-+#ifndef CONFIG_XEN
-+#ifdef CONFIG_SMP
-+ update_process_times(user_mode(regs));
-+#endif
-+#endif
-+ /*
-+ * We take the 'long' return path, and there every subsystem
-+ * grabs the appropriate locks (kernel lock/ irq lock).
-+ *
-+ * we might want to decouple profiling from the 'long path',
-+ * and do the profiling totally in assembly.
-+ *
-+ * Currently this isn't too much of an issue (performance wise),
-+ * we can take more than 100K local irqs per second on a 100 MHz P5.
-+ */
-+}
-+
-+/*
-+ * Local APIC timer interrupt. This is the most natural way for doing
-+ * local interrupts, but local timer interrupts can be emulated by
-+ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
-+ *
-+ * [ if a single-CPU system runs an SMP kernel then we call the local
-+ * interrupt as well. Thus we cannot inline the local irq ... ]
-+ */
-+void smp_apic_timer_interrupt(struct pt_regs *regs)
-+{
-+ /*
-+ * the NMI deadlock-detector uses this.
-+ */
-+ add_pda(apic_timer_irqs, 1);
-+
-+ /*
-+ * NOTE! We'd better ACK the irq immediately,
-+ * because timer handling can be slow.
-+ */
-+ ack_APIC_irq();
-+ /*
-+ * update_process_times() expects us to have done irq_enter().
-+ * Besides, if we don't timer interrupts ignore the global
-+ * interrupt lock, which is the WrongThing (tm) to do.
-+ */
-+ exit_idle();
-+ irq_enter();
-+ smp_local_timer_interrupt(regs);
-+ irq_exit();
-+}
-+
-+int __initdata unsync_tsc_on_multicluster;
-+
-+/*
-+ * This interrupt should _never_ happen with our APIC/SMP architecture
-+ */
-+asmlinkage void smp_spurious_interrupt(void)
-+{
-+ unsigned int v;
-+ exit_idle();
-+ irq_enter();
-+ /*
-+ * Check if this really is a spurious interrupt and ACK it
-+ * if it is a vectored one. Just in case...
-+ * Spurious interrupts should not be ACKed.
-+ */
-+ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
-+ if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
-+ ack_APIC_irq();
-+
-+#if 0
-+ static unsigned long last_warning;
-+ static unsigned long skipped;
-+
-+ /* see sw-dev-man vol 3, chapter 7.4.13.5 */
-+ if (time_before(last_warning+30*HZ,jiffies)) {
-+ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
-+ smp_processor_id(), skipped);
-+ last_warning = jiffies;
-+ skipped = 0;
-+ } else {
-+ skipped++;
-+ }
-+#endif
-+ irq_exit();
-+}
-+
-+/*
-+ * This interrupt should never happen with our APIC/SMP architecture
-+ */
-+
-+asmlinkage void smp_error_interrupt(void)
-+{
-+ unsigned int v, v1;
-+
-+ exit_idle();
-+ irq_enter();
-+ /* First tickle the hardware, only then report what went on. -- REW */
-+ v = apic_read(APIC_ESR);
-+ apic_write(APIC_ESR, 0);
-+ v1 = apic_read(APIC_ESR);
-+ ack_APIC_irq();
-+ atomic_inc(&irq_err_count);
-+
-+ /* Here is what the APIC error bits mean:
-+ 0: Send CS error
-+ 1: Receive CS error
-+ 2: Send accept error
-+ 3: Receive accept error
-+ 4: Reserved
-+ 5: Send illegal vector
-+ 6: Received illegal vector
-+ 7: Illegal register address
-+ */
-+ printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
-+ smp_processor_id(), v , v1);
-+ irq_exit();
-+}
-+
-+int disable_apic;
-+
-+/*
-+ * This initializes the IO-APIC and APIC hardware if this is
-+ * a UP kernel.
-+ */
-+int __init APIC_init_uniprocessor (void)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+ if (smp_found_config)
-+ if (!skip_ioapic_setup && nr_ioapics)
-+ setup_IO_APIC();
-+#endif
-+
-+ return 0;
-+}
-diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
-index 38834bb..06cf058 100644
---- a/arch/x86_64/kernel/asm-offsets.c
-+++ b/arch/x86_64/kernel/asm-offsets.c
-@@ -66,7 +66,9 @@ int main(void)
- DEFINE(pbe_address, offsetof(struct pbe, address));
- DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
- DEFINE(pbe_next, offsetof(struct pbe, next));
-+#ifndef CONFIG_X86_NO_TSS
- BLANK();
- DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
-+#endif
- return 0;
- }
-diff --git a/arch/x86_64/kernel/e820-xen.c b/arch/x86_64/kernel/e820-xen.c
-new file mode 100644
-index 0000000..ac5ae87
---- /dev/null
-+++ b/arch/x86_64/kernel/e820-xen.c
-@@ -0,0 +1,744 @@
-+/*
-+ * Handle the memory map.
-+ * The functions here do the job until bootmem takes over.
-+ * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
-+ *
-+ * Getting sanitize_e820_map() in sync with i386 version by applying change:
-+ * - Provisions for empty E820 memory regions (reported by certain BIOSes).
-+ * Alex Achenbach <xela@slit.de>, December 2002.
-+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
-+ *
-+ */
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/ioport.h>
-+#include <linux/string.h>
-+#include <linux/kexec.h>
-+#include <linux/module.h>
-+
-+#include <asm/page.h>
-+#include <asm/e820.h>
-+#include <asm/proto.h>
-+#include <asm/bootsetup.h>
-+#include <asm/sections.h>
-+#include <xen/interface/memory.h>
-+
-+unsigned long pci_mem_start = 0xaeedbabe;
-+
-+/*
-+ * PFN of last memory page.
-+ */
-+unsigned long end_pfn;
-+EXPORT_SYMBOL(end_pfn);
-+unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
-+unsigned long end_pfn_map;
-+
-+/*
-+ * Add a memory region to the kernel e820 map.
-+ */
-+void __init add_memory_region(unsigned long start, unsigned long size, int type)
-+{
-+ int x = e820.nr_map;
-+
-+ if (x == E820MAX) {
-+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+ return;
-+ }
-+
-+ e820.map[x].addr = start;
-+ e820.map[x].size = size;
-+ e820.map[x].type = type;
-+ e820.nr_map++;
-+}
-+
-+#ifndef CONFIG_XEN
-+
-+/*
-+ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
-+ * The direct mapping extends to end_pfn_map, so that we can directly access
-+ * apertures, ACPI and other tables without having to play with fixmaps.
-+ */
-+
-+/*
-+ * Last pfn which the user wants to use.
-+ */
-+
-+extern struct resource code_resource, data_resource;
-+
-+/* Check for some hardcoded bad areas that early boot is not allowed to touch */
-+static inline int bad_addr(unsigned long *addrp, unsigned long size)
-+{
-+ unsigned long addr = *addrp, last = addr + size;
-+
-+ /* various gunk below that needed for SMP startup */
-+ if (addr < 0x8000) {
-+ *addrp = 0x8000;
-+ return 1;
-+ }
-+
-+ /* direct mapping tables of the kernel */
-+ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
-+ *addrp = table_end << PAGE_SHIFT;
-+ return 1;
-+ }
-+
-+ /* initrd */
-+#ifdef CONFIG_BLK_DEV_INITRD
-+ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
-+ addr < INITRD_START+INITRD_SIZE) {
-+ *addrp = INITRD_START + INITRD_SIZE;
-+ return 1;
-+ }
-+#endif
-+ /* kernel code + 640k memory hole (later should not be needed, but
-+ be paranoid for now) */
-+ if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
-+ *addrp = __pa_symbol(&_end);
-+ return 1;
-+ }
-+ /* XXX ramdisk image here? */
-+ return 0;
-+}
-+
-+int __init e820_mapped(unsigned long start, unsigned long end, unsigned type)
-+{
-+ int i;
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+ if (type && ei->type != type)
-+ continue;
-+ if (ei->addr >= end || ei->addr + ei->size <= start)
-+ continue;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Find a free area in a specific range.
-+ */
-+unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
-+{
-+ int i;
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+ unsigned long addr = ei->addr, last;
-+ if (ei->type != E820_RAM)
-+ continue;
-+ if (addr < start)
-+ addr = start;
-+ if (addr > ei->addr + ei->size)
-+ continue;
-+ while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
-+ ;
-+ last = addr + size;
-+ if (last > ei->addr + ei->size)
-+ continue;
-+ if (last > end)
-+ continue;
-+ return addr;
-+ }
-+ return -1UL;
-+}
-+
-+/*
-+ * Free bootmem based on the e820 table for a node.
-+ */
-+void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
-+{
-+ int i;
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+ unsigned long last, addr;
-+
-+ if (ei->type != E820_RAM ||
-+ ei->addr+ei->size <= start ||
-+ ei->addr >= end)
-+ continue;
-+
-+ addr = round_up(ei->addr, PAGE_SIZE);
-+ if (addr < start)
-+ addr = start;
-+
-+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
-+ if (last >= end)
-+ last = end;
-+
-+ if (last > addr && last-addr >= PAGE_SIZE)
-+ free_bootmem_node(pgdat, addr, last-addr);
-+ }
-+}
-+
-+/*
-+ * Find the highest page frame number we have available
-+ */
-+unsigned long __init e820_end_of_ram(void)
-+{
-+ int i;
-+ unsigned long end_pfn = 0;
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+ unsigned long start, end;
-+
-+ start = round_up(ei->addr, PAGE_SIZE);
-+ end = round_down(ei->addr + ei->size, PAGE_SIZE);
-+ if (start >= end)
-+ continue;
-+ if (ei->type == E820_RAM) {
-+ if (end > end_pfn<<PAGE_SHIFT)
-+ end_pfn = end>>PAGE_SHIFT;
-+ } else {
-+ if (end > end_pfn_map<<PAGE_SHIFT)
-+ end_pfn_map = end>>PAGE_SHIFT;
-+ }
-+ }
-+
-+ if (end_pfn > end_pfn_map)
-+ end_pfn_map = end_pfn;
-+ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
-+ end_pfn_map = MAXMEM>>PAGE_SHIFT;
-+ if (end_pfn > end_user_pfn)
-+ end_pfn = end_user_pfn;
-+ if (end_pfn > end_pfn_map)
-+ end_pfn = end_pfn_map;
-+
-+ return end_pfn;
-+}
-+
-+/*
-+ * Compute how much memory is missing in a range.
-+ * Unlike the other functions in this file the arguments are in page numbers.
-+ */
-+unsigned long __init
-+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
-+{
-+ unsigned long ram = 0;
-+ unsigned long start = start_pfn << PAGE_SHIFT;
-+ unsigned long end = end_pfn << PAGE_SHIFT;
-+ int i;
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct e820entry *ei = &e820.map[i];
-+ unsigned long last, addr;
-+
-+ if (ei->type != E820_RAM ||
-+ ei->addr+ei->size <= start ||
-+ ei->addr >= end)
-+ continue;
-+
-+ addr = round_up(ei->addr, PAGE_SIZE);
-+ if (addr < start)
-+ addr = start;
-+
-+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
-+ if (last >= end)
-+ last = end;
-+
-+ if (last > addr)
-+ ram += last - addr;
-+ }
-+ return ((end - start) - ram) >> PAGE_SHIFT;
-+}
-+
-+/*
-+ * Mark e820 reserved areas as busy for the resource manager.
-+ */
-+void __init e820_reserve_resources(void)
-+{
-+ int i;
-+ for (i = 0; i < e820.nr_map; i++) {
-+ struct resource *res;
-+ res = alloc_bootmem_low(sizeof(struct resource));
-+ switch (e820.map[i].type) {
-+ case E820_RAM: res->name = "System RAM"; break;
-+ case E820_ACPI: res->name = "ACPI Tables"; break;
-+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
-+ default: res->name = "reserved";
-+ }
-+ res->start = e820.map[i].addr;
-+ res->end = res->start + e820.map[i].size - 1;
-+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+ request_resource(&iomem_resource, res);
-+ if (e820.map[i].type == E820_RAM) {
-+ /*
-+ * We don't know which RAM region contains kernel data,
-+ * so we try it repeatedly and let the resource manager
-+ * test it.
-+ */
-+ request_resource(res, &code_resource);
-+ request_resource(res, &data_resource);
-+#ifdef CONFIG_KEXEC
-+ request_resource(res, &crashk_res);
-+#endif
-+ }
-+ }
-+}
-+#endif /* CONFIG_XEN */
-+
-+void __init e820_print_map(char *who)
-+{
-+ int i;
-+
-+ for (i = 0; i < e820.nr_map; i++) {
-+ printk(" %s: %016Lx - %016Lx ", who,
-+ (unsigned long long) e820.map[i].addr,
-+ (unsigned long long) (e820.map[i].addr + e820.map[i].size));
-+ switch (e820.map[i].type) {
-+ case E820_RAM: printk("(usable)\n");
-+ break;
-+ case E820_RESERVED:
-+ printk("(reserved)\n");
-+ break;
-+ case E820_ACPI:
-+ printk("(ACPI data)\n");
-+ break;
-+ case E820_NVS:
-+ printk("(ACPI NVS)\n");
-+ break;
-+ default: printk("type %u\n", e820.map[i].type);
-+ break;
-+ }
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Sanitize the BIOS e820 map.
-+ *
-+ * Some e820 responses include overlapping entries. The following
-+ * replaces the original e820 map with a new one, removing overlaps.
-+ *
-+ */
-+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-+{
-+ struct change_member {
-+ struct e820entry *pbios; /* pointer to original bios entry */
-+ unsigned long long addr; /* address for this change point */
-+ };
-+ static struct change_member change_point_list[2*E820MAX] __initdata;
-+ static struct change_member *change_point[2*E820MAX] __initdata;
-+ static struct e820entry *overlap_list[E820MAX] __initdata;
-+ static struct e820entry new_bios[E820MAX] __initdata;
-+ struct change_member *change_tmp;
-+ unsigned long current_type, last_type;
-+ unsigned long long last_addr;
-+ int chgidx, still_changing;
-+ int overlap_entries;
-+ int new_bios_entry;
-+ int old_nr, new_nr, chg_nr;
-+ int i;
-+
-+ /*
-+ Visually we're performing the following (1,2,3,4 = memory types)...
-+
-+ Sample memory map (w/overlaps):
-+ ____22__________________
-+ ______________________4_
-+ ____1111________________
-+ _44_____________________
-+ 11111111________________
-+ ____________________33__
-+ ___________44___________
-+ __________33333_________
-+ ______________22________
-+ ___________________2222_
-+ _________111111111______
-+ _____________________11_
-+ _________________4______
-+
-+ Sanitized equivalent (no overlap):
-+ 1_______________________
-+ _44_____________________
-+ ___1____________________
-+ ____22__________________
-+ ______11________________
-+ _________1______________
-+ __________3_____________
-+ ___________44___________
-+ _____________33_________
-+ _______________2________
-+ ________________1_______
-+ _________________4______
-+ ___________________2____
-+ ____________________33__
-+ ______________________4_
-+ */
-+
-+ /* if there's only one memory region, don't bother */
-+ if (*pnr_map < 2)
-+ return -1;
-+
-+ old_nr = *pnr_map;
-+
-+ /* bail out if we find any unreasonable addresses in bios map */
-+ for (i=0; i<old_nr; i++)
-+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-+ return -1;
-+
-+ /* create pointers for initial change-point information (for sorting) */
-+ for (i=0; i < 2*old_nr; i++)
-+ change_point[i] = &change_point_list[i];
-+
-+ /* record all known change-points (starting and ending addresses),
-+ omitting those that are for empty memory regions */
-+ chgidx = 0;
-+ for (i=0; i < old_nr; i++) {
-+ if (biosmap[i].size != 0) {
-+ change_point[chgidx]->addr = biosmap[i].addr;
-+ change_point[chgidx++]->pbios = &biosmap[i];
-+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+ change_point[chgidx++]->pbios = &biosmap[i];
-+ }
-+ }
-+ chg_nr = chgidx;
-+
-+ /* sort change-point list by memory addresses (low -> high) */
-+ still_changing = 1;
-+ while (still_changing) {
-+ still_changing = 0;
-+ for (i=1; i < chg_nr; i++) {
-+ /* if <current_addr> > <last_addr>, swap */
-+ /* or, if current=<start_addr> & last=<end_addr>, swap */
-+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
-+ ((change_point[i]->addr == change_point[i-1]->addr) &&
-+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
-+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-+ )
-+ {
-+ change_tmp = change_point[i];
-+ change_point[i] = change_point[i-1];
-+ change_point[i-1] = change_tmp;
-+ still_changing=1;
-+ }
-+ }
-+ }
-+
-+ /* create a new bios memory map, removing overlaps */
-+ overlap_entries=0; /* number of entries in the overlap table */
-+ new_bios_entry=0; /* index for creating new bios map entries */
-+ last_type = 0; /* start with undefined memory type */
-+ last_addr = 0; /* start with 0 as last starting address */
-+ /* loop through change-points, determining affect on the new bios map */
-+ for (chgidx=0; chgidx < chg_nr; chgidx++)
-+ {
-+ /* keep track of all overlapping bios entries */
-+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-+ {
-+ /* add map entry to overlap list (> 1 entry implies an overlap) */
-+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-+ }
-+ else
-+ {
-+ /* remove entry from list (order independent, so swap with last) */
-+ for (i=0; i<overlap_entries; i++)
-+ {
-+ if (overlap_list[i] == change_point[chgidx]->pbios)
-+ overlap_list[i] = overlap_list[overlap_entries-1];
-+ }
-+ overlap_entries--;
-+ }
-+ /* if there are overlapping entries, decide which "type" to use */
-+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+ current_type = 0;
-+ for (i=0; i<overlap_entries; i++)
-+ if (overlap_list[i]->type > current_type)
-+ current_type = overlap_list[i]->type;
-+ /* continue building up new bios map based on this information */
-+ if (current_type != last_type) {
-+ if (last_type != 0) {
-+ new_bios[new_bios_entry].size =
-+ change_point[chgidx]->addr - last_addr;
-+ /* move forward only if the new size was non-zero */
-+ if (new_bios[new_bios_entry].size != 0)
-+ if (++new_bios_entry >= E820MAX)
-+ break; /* no more space left for new bios entries */
-+ }
-+ if (current_type != 0) {
-+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+ new_bios[new_bios_entry].type = current_type;
-+ last_addr=change_point[chgidx]->addr;
-+ }
-+ last_type = current_type;
-+ }
-+ }
-+ new_nr = new_bios_entry; /* retain count for new bios entries */
-+
-+ /* copy new bios mapping into original location */
-+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+ *pnr_map = new_nr;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Copy the BIOS e820 map into a safe place.
-+ *
-+ * Sanity-check it while we're at it..
-+ *
-+ * If we're lucky and live on a modern system, the setup code
-+ * will have given us a memory map that we can use to properly
-+ * set up memory. If we aren't, we'll fake a memory map.
-+ *
-+ * We check to see that the memory map contains at least 2 elements
-+ * before we'll use it, because the detection code in setup.S may
-+ * not be perfect and most every PC known to man has two memory
-+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
-+ * thinkpad 560x, for example, does not cooperate with the memory
-+ * detection code.)
-+ */
-+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-+{
-+ /* Only one memory region (or negative)? Ignore it */
-+ if (nr_map < 2)
-+ return -1;
-+
-+ do {
-+ unsigned long start = biosmap->addr;
-+ unsigned long size = biosmap->size;
-+ unsigned long end = start + size;
-+ unsigned long type = biosmap->type;
-+
-+ /* Overflow in 64 bits? Ignore the memory map. */
-+ if (start > end)
-+ return -1;
-+
-+ /*
-+ * Some BIOSes claim RAM in the 640k - 1M region.
-+ * Not right. Fix it up.
-+ *
-+ * This should be removed on Hammer which is supposed to not
-+ * have non e820 covered ISA mappings there, but I had some strange
-+ * problems so it stays for now. -AK
-+ */
-+ if (type == E820_RAM) {
-+ if (start < 0x100000ULL && end > 0xA0000ULL) {
-+ if (start < 0xA0000ULL)
-+ add_memory_region(start, 0xA0000ULL-start, type);
-+ if (end <= 0x100000ULL)
-+ continue;
-+ start = 0x100000ULL;
-+ size = end - start;
-+ }
-+ }
-+
-+ add_memory_region(start, size, type);
-+ } while (biosmap++,--nr_map);
-+ return 0;
-+}
-+
-+void __init setup_memory_region(void)
-+{
-+ char *who = "BIOS-e820";
-+
-+ /*
-+ * Try to copy the BIOS-supplied E820-map.
-+ *
-+ * Otherwise fake a memory map; one section from 0k->640k,
-+ * the next section from 1mb->appropriate_mem_k
-+ */
-+ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
-+ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
-+ unsigned long mem_size;
-+
-+ /* compare results from other methods and take the greater */
-+ if (ALT_MEM_K < EXT_MEM_K) {
-+ mem_size = EXT_MEM_K;
-+ who = "BIOS-88";
-+ } else {
-+ mem_size = ALT_MEM_K;
-+ who = "BIOS-e801";
-+ }
-+
-+ e820.nr_map = 0;
-+ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
-+ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
-+ }
-+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+ e820_print_map(who);
-+}
-+
-+#else /* CONFIG_XEN */
-+
-+extern unsigned long xen_override_max_pfn;
-+extern union xen_start_info_union xen_start_info_union;
-+
-+unsigned long __init e820_end_of_ram(void)
-+{
-+ unsigned long max_end_pfn;
-+
-+ if (xen_override_max_pfn == 0) {
-+ max_end_pfn = xen_start_info->nr_pages;
-+ /* Default 8MB slack (to balance backend allocations). */
-+ max_end_pfn += 8 << (20 - PAGE_SHIFT);
-+ } else if (xen_override_max_pfn > xen_start_info->nr_pages) {
-+ max_end_pfn = xen_override_max_pfn;
-+ } else {
-+ max_end_pfn = xen_start_info->nr_pages;
-+ }
-+
-+ return max_end_pfn;
-+}
-+
-+unsigned long __init
-+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
-+{
-+ return 0;
-+}
-+
-+void __init e820_reserve_resources(void)
-+{
-+ dom0_op_t op;
-+ struct dom0_memory_map_entry *map;
-+ unsigned long gapstart, gapsize, round, last;
-+ int i, found = 0;
-+
-+ if (!(xen_start_info->flags & SIF_INITDOMAIN))
-+ return;
-+
-+ map = alloc_bootmem_low_pages(PAGE_SIZE);
-+ op.cmd = DOM0_PHYSICAL_MEMORY_MAP;
-+ op.u.physical_memory_map.memory_map = map;
-+ op.u.physical_memory_map.max_map_entries =
-+ PAGE_SIZE / sizeof(struct dom0_memory_map_entry);
-+ BUG_ON(HYPERVISOR_dom0_op(&op));
-+
-+ last = 0x100000000ULL;
-+ gapstart = 0x10000000;
-+ gapsize = 0x400000;
-+
-+ for (i = op.u.physical_memory_map.nr_map_entries - 1; i >= 0; i--) {
-+ struct resource *res;
-+
-+ if ((last > map[i].end) && ((last - map[i].end) > gapsize)) {
-+ gapsize = last - map[i].end;
-+ gapstart = map[i].end;
-+ found = 1;
-+ }
-+ if (map[i].start < last)
-+ last = map[i].start;
-+
-+ if (map[i].end > 0x100000000ULL)
-+ continue;
-+ res = alloc_bootmem_low(sizeof(struct resource));
-+ res->name = map[i].is_ram ? "System RAM" : "reserved";
-+ res->start = map[i].start;
-+ res->end = map[i].end - 1;
-+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+ request_resource(&iomem_resource, res);
-+ }
-+
-+ free_bootmem(__pa(map), PAGE_SIZE);
-+
-+ if (!found) {
-+ gapstart = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+ gapstart = (gapstart << PAGE_SHIFT) + 1024*1024;
-+ printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
-+ KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
-+ }
-+
-+ /*
-+ * See how much we want to round up: start off with
-+ * rounding to the next 1MB area.
-+ */
-+ round = 0x100000;
-+ while ((gapsize >> 4) > round)
-+ round += round;
-+ /* Fun with two's complement */
-+ pci_mem_start = (gapstart + round) & -round;
-+
-+ printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-+ pci_mem_start, gapstart, gapsize);
-+}
-+
-+#endif
-+
-+void __init parse_memopt(char *p, char **from)
-+{
-+ end_user_pfn = memparse(p, from);
-+ end_user_pfn >>= PAGE_SHIFT;
-+ xen_override_max_pfn = (unsigned long) end_user_pfn;
-+}
-+
-+void __init parse_memmapopt(char *p, char **from)
-+{
-+ unsigned long long start_at, mem_size;
-+
-+ mem_size = memparse(p, from);
-+ p = *from;
-+ if (*p == '@') {
-+ start_at = memparse(p+1, from);
-+ add_memory_region(start_at, mem_size, E820_RAM);
-+ } else if (*p == '#') {
-+ start_at = memparse(p+1, from);
-+ add_memory_region(start_at, mem_size, E820_ACPI);
-+ } else if (*p == '$') {
-+ start_at = memparse(p+1, from);
-+ add_memory_region(start_at, mem_size, E820_RESERVED);
-+ } else {
-+ end_user_pfn = (mem_size >> PAGE_SHIFT);
-+ }
-+ p = *from;
-+}
-+
-+/*
-+ * Search for the biggest gap in the low 32 bits of the e820
-+ * memory space. We pass this space to PCI to assign MMIO resources
-+ * for hotplug or unconfigured devices in.
-+ * Hopefully the BIOS let enough space left.
-+ */
-+__init void e820_setup_gap(void)
-+{
-+#ifndef CONFIG_XEN
-+ unsigned long gapstart, gapsize;
-+ unsigned long last;
-+ int i;
-+ int found = 0;
-+
-+ last = 0x100000000ull;
-+ gapstart = 0x10000000;
-+ gapsize = 0x400000;
-+ i = e820.nr_map;
-+ while (--i >= 0) {
-+ unsigned long long start = e820.map[i].addr;
-+ unsigned long long end = start + e820.map[i].size;
-+
-+ /*
-+ * Since "last" is at most 4GB, we know we'll
-+ * fit in 32 bits if this condition is true
-+ */
-+ if (last > end) {
-+ unsigned long gap = last - end;
-+
-+ if (gap > gapsize) {
-+ gapsize = gap;
-+ gapstart = end;
-+ found = 1;
-+ }
-+ }
-+ if (start < last)
-+ last = start;
-+ }
-+
-+ if (!found) {
-+ gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
-+ printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
-+ KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
-+ }
-+
-+ /*
-+ * Start allocating dynamic PCI memory a bit into the gap,
-+ * aligned up to the nearest megabyte.
-+ *
-+ * Question: should we try to pad it up a bit (do something
-+ * like " + (gapsize >> 3)" in there too?). We now have the
-+ * technology.
-+ */
-+ pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
-+
-+ printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-+ pci_mem_start, gapstart, gapsize);
-+#endif
-+}
-diff --git a/arch/x86_64/kernel/early_printk-xen.c b/arch/x86_64/kernel/early_printk-xen.c
-new file mode 100644
-index 0000000..c1eb6b5
---- /dev/null
-+++ b/arch/x86_64/kernel/early_printk-xen.c
-@@ -0,0 +1,306 @@
-+#include <linux/config.h>
-+#include <linux/console.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/string.h>
-+#include <linux/tty.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/fcntl.h>
-+
-+/* Simple VGA output */
-+
-+#ifdef __i386__
-+#include <asm/setup.h>
-+#define VGABASE (__ISA_IO_base + 0xb8000)
-+#else
-+#include <asm/bootsetup.h>
-+#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
-+#endif
-+
-+#define MAX_YPOS max_ypos
-+#define MAX_XPOS max_xpos
-+
-+static int max_ypos = 25, max_xpos = 80;
-+
-+#ifndef CONFIG_XEN
-+static int current_ypos = 1, current_xpos = 0;
-+
-+static void early_vga_write(struct console *con, const char *str, unsigned n)
-+{
-+ char c;
-+ int i, k, j;
-+
-+ while ((c = *str++) != '\0' && n-- > 0) {
-+ if (current_ypos >= MAX_YPOS) {
-+ /* scroll 1 line up */
-+ for (k = 1, j = 0; k < MAX_YPOS; k++, j++) {
-+ for (i = 0; i < MAX_XPOS; i++) {
-+ writew(readw(VGABASE + 2*(MAX_XPOS*k + i)),
-+ VGABASE + 2*(MAX_XPOS*j + i));
-+ }
-+ }
-+ for (i = 0; i < MAX_XPOS; i++)
-+ writew(0x720, VGABASE + 2*(MAX_XPOS*j + i));
-+ current_ypos = MAX_YPOS-1;
-+ }
-+ if (c == '\n') {
-+ current_xpos = 0;
-+ current_ypos++;
-+ } else if (c != '\r') {
-+ writew(((0x7 << 8) | (unsigned short) c),
-+ VGABASE + 2*(MAX_XPOS*current_ypos +
-+ current_xpos++));
-+ if (current_xpos >= MAX_XPOS) {
-+ current_xpos = 0;
-+ current_ypos++;
-+ }
-+ }
-+ }
-+}
-+
-+static struct console early_vga_console = {
-+ .name = "earlyvga",
-+ .write = early_vga_write,
-+ .flags = CON_PRINTBUFFER,
-+ .index = -1,
-+};
-+
-+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
-+
-+static int early_serial_base = 0x3f8; /* ttyS0 */
-+
-+#define XMTRDY 0x20
-+
-+#define DLAB 0x80
-+
-+#define TXR 0 /* Transmit register (WRITE) */
-+#define RXR 0 /* Receive register (READ) */
-+#define IER 1 /* Interrupt Enable */
-+#define IIR 2 /* Interrupt ID */
-+#define FCR 2 /* FIFO control */
-+#define LCR 3 /* Line control */
-+#define MCR 4 /* Modem control */
-+#define LSR 5 /* Line Status */
-+#define MSR 6 /* Modem Status */
-+#define DLL 0 /* Divisor Latch Low */
-+#define DLH 1 /* Divisor latch High */
-+
-+static int early_serial_putc(unsigned char ch)
-+{
-+ unsigned timeout = 0xffff;
-+ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
-+ cpu_relax();
-+ outb(ch, early_serial_base + TXR);
-+ return timeout ? 0 : -1;
-+}
-+
-+static void early_serial_write(struct console *con, const char *s, unsigned n)
-+{
-+ while (*s && n-- > 0) {
-+ early_serial_putc(*s);
-+ if (*s == '\n')
-+ early_serial_putc('\r');
-+ s++;
-+ }
-+}
-+
-+#define DEFAULT_BAUD 9600
-+
-+static __init void early_serial_init(char *s)
-+{
-+ unsigned char c;
-+ unsigned divisor;
-+ unsigned baud = DEFAULT_BAUD;
-+ char *e;
-+
-+ if (*s == ',')
-+ ++s;
-+
-+ if (*s) {
-+ unsigned port;
-+ if (!strncmp(s,"0x",2)) {
-+ early_serial_base = simple_strtoul(s, &e, 16);
-+ } else {
-+ static int bases[] = { 0x3f8, 0x2f8 };
-+
-+ if (!strncmp(s,"ttyS",4))
-+ s += 4;
-+ port = simple_strtoul(s, &e, 10);
-+ if (port > 1 || s == e)
-+ port = 0;
-+ early_serial_base = bases[port];
-+ }
-+ s += strcspn(s, ",");
-+ if (*s == ',')
-+ s++;
-+ }
-+
-+ outb(0x3, early_serial_base + LCR); /* 8n1 */
-+ outb(0, early_serial_base + IER); /* no interrupt */
-+ outb(0, early_serial_base + FCR); /* no fifo */
-+ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
-+
-+ if (*s) {
-+ baud = simple_strtoul(s, &e, 0);
-+ if (baud == 0 || s == e)
-+ baud = DEFAULT_BAUD;
-+ }
-+
-+ divisor = 115200 / baud;
-+ c = inb(early_serial_base + LCR);
-+ outb(c | DLAB, early_serial_base + LCR);
-+ outb(divisor & 0xff, early_serial_base + DLL);
-+ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
-+ outb(c & ~DLAB, early_serial_base + LCR);
-+}
-+
-+#else /* CONFIG_XEN */
-+
-+#undef SCREEN_INFO
-+#define SCREEN_INFO screen_info
-+extern struct screen_info screen_info;
-+
-+static void
-+early_serial_write(struct console *con, const char *s, unsigned count)
-+{
-+ int n;
-+
-+ while (count > 0) {
-+ n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
-+ if (n <= 0)
-+ break;
-+ count -= n;
-+ s += n;
-+ }
-+}
-+
-+static __init void early_serial_init(char *s)
-+{
-+}
-+
-+/*
-+ * No early VGA console on Xen, as we do not have convenient ISA-space
-+ * mappings. Someone should fix this for domain 0. For now, use fake serial.
-+ */
-+#define early_vga_console early_serial_console
-+
-+#endif
-+
-+static struct console early_serial_console = {
-+ .name = "earlyser",
-+ .write = early_serial_write,
-+ .flags = CON_PRINTBUFFER,
-+ .index = -1,
-+};
-+
-+/* Console interface to a host file on AMD's SimNow! */
-+
-+static int simnow_fd;
-+
-+enum {
-+ MAGIC1 = 0xBACCD00A,
-+ MAGIC2 = 0xCA110000,
-+ XOPEN = 5,
-+ XWRITE = 4,
-+};
-+
-+static noinline long simnow(long cmd, long a, long b, long c)
-+{
-+ long ret;
-+ asm volatile("cpuid" :
-+ "=a" (ret) :
-+ "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
-+ return ret;
-+}
-+
-+void __init simnow_init(char *str)
-+{
-+ char *fn = "klog";
-+ if (*str == '=')
-+ fn = ++str;
-+ /* error ignored */
-+ simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
-+}
-+
-+static void simnow_write(struct console *con, const char *s, unsigned n)
-+{
-+ simnow(XWRITE, simnow_fd, (unsigned long)s, n);
-+}
-+
-+static struct console simnow_console = {
-+ .name = "simnow",
-+ .write = simnow_write,
-+ .flags = CON_PRINTBUFFER,
-+ .index = -1,
-+};
-+
-+/* Direct interface for emergencies */
-+struct console *early_console = &early_vga_console;
-+static int early_console_initialized = 0;
-+
-+void early_printk(const char *fmt, ...)
-+{
-+ char buf[512];
-+ int n;
-+ va_list ap;
-+
-+ va_start(ap,fmt);
-+ n = vscnprintf(buf,512,fmt,ap);
-+ early_console->write(early_console,buf,n);
-+ va_end(ap);
-+}
-+
-+static int __initdata keep_early;
-+
-+int __init setup_early_printk(char *opt)
-+{
-+ char *space;
-+ char buf[256];
-+
-+ if (early_console_initialized)
-+ return -1;
-+
-+ strlcpy(buf,opt,sizeof(buf));
-+ space = strchr(buf, ' ');
-+ if (space)
-+ *space = 0;
-+
-+ if (strstr(buf,"keep"))
-+ keep_early = 1;
-+
-+ if (!strncmp(buf, "serial", 6)) {
-+ early_serial_init(buf + 6);
-+ early_console = &early_serial_console;
-+ } else if (!strncmp(buf, "ttyS", 4)) {
-+ early_serial_init(buf);
-+ early_console = &early_serial_console;
-+ } else if (!strncmp(buf, "vga", 3)
-+ && SCREEN_INFO.orig_video_isVGA == 1) {
-+ max_xpos = SCREEN_INFO.orig_video_cols;
-+ max_ypos = SCREEN_INFO.orig_video_lines;
-+ early_console = &early_vga_console;
-+ } else if (!strncmp(buf, "simnow", 6)) {
-+ simnow_init(buf + 6);
-+ early_console = &simnow_console;
-+ keep_early = 1;
-+ }
-+ early_console_initialized = 1;
-+ register_console(early_console);
-+ return 0;
-+}
-+
-+void __init disable_early_printk(void)
-+{
-+ if (!early_console_initialized || !early_console)
-+ return;
-+ if (!keep_early) {
-+ printk("disabling early console\n");
-+ unregister_console(early_console);
-+ early_console_initialized = 0;
-+ } else {
-+ printk("keeping early console\n");
-+ }
-+}
-+
-+__setup("earlyprintk=", setup_early_printk);
-diff --git a/arch/x86_64/kernel/entry-xen.S b/arch/x86_64/kernel/entry-xen.S
-new file mode 100644
-index 0000000..dc41c56
---- /dev/null
-+++ b/arch/x86_64/kernel/entry-xen.S
-@@ -0,0 +1,1138 @@
-+/*
-+ * linux/arch/x86_64/entry.S
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
-+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
-+ *
-+ * $Id$
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Asit Mallick <asit.k.mallick@intel.com>
-+ * Modified for Xen
-+ */
-+
-+/*
-+ * entry.S contains the system-call and fault low-level handling routines.
-+ *
-+ * NOTE: This code handles signal-recognition, which happens every time
-+ * after an interrupt and after each system call.
-+ *
-+ * Normal syscalls and interrupts don't save a full stack frame, this is
-+ * only done for syscall tracing, signals or fork/exec et.al.
-+ *
-+ * A note on terminology:
-+ * - top of stack: Architecture defined interrupt frame from SS to RIP
-+ * at the top of the kernel process stack.
-+ * - partial stack frame: partially saved registers upto R11.
-+ * - full stack frame: Like partial stack frame, but all register saved.
-+ *
-+ * TODO:
-+ * - schedule it carefully for the final hardware.
-+ */
-+
-+#define ASSEMBLY 1
-+#include <linux/config.h>
-+#ifdef CONFIG_DEBUG_INFO
-+#undef CONFIG_DEBUG_INFO
-+#endif
-+#include <linux/linkage.h>
-+#include <asm/segment.h>
-+#include <asm/smp.h>
-+#include <asm/cache.h>
-+#include <asm/errno.h>
-+#include <asm/dwarf2.h>
-+#include <asm/calling.h>
-+#include <asm/asm-offsets.h>
-+#include <asm/msr.h>
-+#include <asm/unistd.h>
-+#include <asm/thread_info.h>
-+#include <asm/hw_irq.h>
-+#include <asm/page.h>
-+#include <asm/errno.h>
-+#include <xen/interface/arch-x86_64.h>
-+#include <xen/interface/features.h>
-+
-+#include "xen_entry.S"
-+
-+ .code64
-+
-+#ifndef CONFIG_PREEMPT
-+#define retint_kernel retint_restore_args
-+#endif
-+
-+NMI_MASK = 0x80000000
-+
-+/*
-+ * C code is not supposed to know about undefined top of stack. Every time
-+ * a C function with an pt_regs argument is called from the SYSCALL based
-+ * fast path FIXUP_TOP_OF_STACK is needed.
-+ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
-+ * manipulation.
-+ */
-+
-+ /* %rsp:at FRAMEEND */
-+ .macro FIXUP_TOP_OF_STACK tmp
-+ movq $__USER_CS,CS(%rsp)
-+ movq $-1,RCX(%rsp)
-+ .endm
-+
-+ .macro RESTORE_TOP_OF_STACK tmp,offset=0
-+ .endm
-+
-+ .macro FAKE_STACK_FRAME child_rip
-+ /* push in order ss, rsp, eflags, cs, rip */
-+ xorl %eax, %eax
-+ pushq %rax /* ss */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET ss,0*/
-+ pushq %rax /* rsp */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rsp,0
-+ pushq $(1<<9) /* eflags - interrupts on */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET rflags,0*/
-+ pushq $__KERNEL_CS /* cs */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ /*CFI_REL_OFFSET cs,0*/
-+ pushq \child_rip /* rip */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rip,0
-+ pushq %rax /* orig rax */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ .endm
-+
-+ .macro UNFAKE_STACK_FRAME
-+ addq $8*6, %rsp
-+ CFI_ADJUST_CFA_OFFSET -(6*8)
-+ .endm
-+
-+ .macro CFI_DEFAULT_STACK start=1
-+ .if \start
-+ CFI_STARTPROC simple
-+ CFI_DEF_CFA rsp,SS+8
-+ .else
-+ CFI_DEF_CFA_OFFSET SS+8
-+ .endif
-+ CFI_REL_OFFSET r15,R15
-+ CFI_REL_OFFSET r14,R14
-+ CFI_REL_OFFSET r13,R13
-+ CFI_REL_OFFSET r12,R12
-+ CFI_REL_OFFSET rbp,RBP
-+ CFI_REL_OFFSET rbx,RBX
-+ CFI_REL_OFFSET r11,R11
-+ CFI_REL_OFFSET r10,R10
-+ CFI_REL_OFFSET r9,R9
-+ CFI_REL_OFFSET r8,R8
-+ CFI_REL_OFFSET rax,RAX
-+ CFI_REL_OFFSET rcx,RCX
-+ CFI_REL_OFFSET rdx,RDX
-+ CFI_REL_OFFSET rsi,RSI
-+ CFI_REL_OFFSET rdi,RDI
-+ CFI_REL_OFFSET rip,RIP
-+ /*CFI_REL_OFFSET cs,CS*/
-+ /*CFI_REL_OFFSET rflags,EFLAGS*/
-+ CFI_REL_OFFSET rsp,RSP
-+ /*CFI_REL_OFFSET ss,SS*/
-+ .endm
-+
-+ /*
-+ * Must be consistent with the definition in arch-x86_64.h:
-+ * struct iret_context {
-+ * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+ * };
-+ * #define VGCF_IN_SYSCALL (1<<8)
-+ */
-+ .macro HYPERVISOR_IRET flag
-+ testb $3,1*8(%rsp)
-+ jnz 2f
-+ testl $NMI_MASK,2*8(%rsp)
-+ jnz 2f
-+
-+ testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
-+ jnz 1f
-+
-+ /* Direct iret to kernel space. Correct CS and SS. */
-+ orb $3,1*8(%rsp)
-+ orb $3,4*8(%rsp)
-+1: iretq
-+
-+2: /* Slow iret via hypervisor. */
-+ andl $~NMI_MASK, 16(%rsp)
-+ pushq $\flag
-+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
-+ .endm
-+
-+ .macro SWITCH_TO_KERNEL ssoff,adjust=0
-+ jc 1f
-+ orb $1,\ssoff-\adjust+4(%rsp)
-+1:
-+ .endm
-+
-+/*
-+ * A newly forked process directly context switches into this.
-+ */
-+/* rdi: prev */
-+ENTRY(ret_from_fork)
-+ CFI_DEFAULT_STACK
-+ call schedule_tail
-+ GET_THREAD_INFO(%rcx)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+ jnz rff_trace
-+rff_action:
-+ RESTORE_REST
-+ testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
-+ je int_ret_from_sys_call
-+ testl $_TIF_IA32,threadinfo_flags(%rcx)
-+ jnz int_ret_from_sys_call
-+ RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
-+ jmp ret_from_sys_call
-+rff_trace:
-+ movq %rsp,%rdi
-+ call syscall_trace_leave
-+ GET_THREAD_INFO(%rcx)
-+ jmp rff_action
-+ CFI_ENDPROC
-+
-+/*
-+ * System call entry. Upto 6 arguments in registers are supported.
-+ *
-+ * SYSCALL does not save anything on the stack and does not change the
-+ * stack pointer.
-+ */
-+
-+/*
-+ * Register setup:
-+ * rax system call number
-+ * rdi arg0
-+ * rcx return address for syscall/sysret, C arg3
-+ * rsi arg1
-+ * rdx arg2
-+ * r10 arg3 (--> moved to rcx for C)
-+ * r8 arg4
-+ * r9 arg5
-+ * r11 eflags for syscall/sysret, temporary for C
-+ * r12-r15,rbp,rbx saved by C code, not touched.
-+ *
-+ * Interrupts are off on entry.
-+ * Only called from user space.
-+ *
-+ * XXX if we had a free scratch register we could save the RSP into the stack frame
-+ * and report it properly in ps. Unfortunately we haven't.
-+ */
-+
-+ENTRY(system_call)
-+ CFI_STARTPROC simple
-+ CFI_DEF_CFA rsp,0
-+ CFI_REGISTER rip,rcx
-+ /*CFI_REGISTER rflags,r11*/
-+ SAVE_ARGS -8,0
-+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
-+ XEN_UNBLOCK_EVENTS(%r11)
-+ GET_THREAD_INFO(%rcx)
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+ CFI_REMEMBER_STATE
-+ jnz tracesys
-+ cmpq $__NR_syscall_max,%rax
-+ ja badsys
-+ movq %r10,%rcx
-+ call *sys_call_table(,%rax,8) # XXX: rip relative
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+/*
-+ * Syscall return path ending with SYSRET (fast path)
-+ * Has incomplete stack frame and undefined top of stack.
-+ */
-+ .globl ret_from_sys_call
-+ret_from_sys_call:
-+ movl $_TIF_ALLWORK_MASK,%edi
-+ /* edi: flagmask */
-+sysret_check:
-+ GET_THREAD_INFO(%rcx)
-+ XEN_BLOCK_EVENTS(%rsi)
-+ movl threadinfo_flags(%rcx),%edx
-+ andl %edi,%edx
-+ CFI_REMEMBER_STATE
-+ jnz sysret_careful
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ CFI_REGISTER rip,rcx
-+ RESTORE_ARGS 0,8,0
-+ /*CFI_REGISTER rflags,r11*/
-+ HYPERVISOR_IRET VGCF_IN_SYSCALL
-+
-+ /* Handle reschedules */
-+ /* edx: work, edi: workmask */
-+sysret_careful:
-+ CFI_RESTORE_STATE
-+ bt $TIF_NEED_RESCHED,%edx
-+ jnc sysret_signal
-+ XEN_BLOCK_EVENTS(%rsi)
-+ pushq %rdi
-+ CFI_ADJUST_CFA_OFFSET 8
-+ call schedule
-+ popq %rdi
-+ CFI_ADJUST_CFA_OFFSET -8
-+ jmp sysret_check
-+
-+ /* Handle a signal */
-+sysret_signal:
-+/* sti */
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-+ jz 1f
-+
-+ /* Really a signal */
-+ /* edx: work flags (arg3) */
-+ leaq do_notify_resume(%rip),%rax
-+ leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
-+ xorl %esi,%esi # oldset -> arg2
-+ call ptregscall_common
-+1: movl $_TIF_NEED_RESCHED,%edi
-+ jmp sysret_check
-+
-+badsys:
-+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-+ jmp ret_from_sys_call
-+
-+ /* Do syscall tracing */
-+tracesys:
-+ CFI_RESTORE_STATE
-+ SAVE_REST
-+ movq $-ENOSYS,RAX(%rsp)
-+ FIXUP_TOP_OF_STACK %rdi
-+ movq %rsp,%rdi
-+ call syscall_trace_enter
-+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
-+ RESTORE_REST
-+ cmpq $__NR_syscall_max,%rax
-+ ja 1f
-+ movq %r10,%rcx /* fixup for C */
-+ call *sys_call_table(,%rax,8)
-+ movq %rax,RAX-ARGOFFSET(%rsp)
-+1: SAVE_REST
-+ movq %rsp,%rdi
-+ call syscall_trace_leave
-+ RESTORE_TOP_OF_STACK %rbx
-+ RESTORE_REST
-+ jmp ret_from_sys_call
-+ CFI_ENDPROC
-+
-+/*
-+ * Syscall return path ending with IRET.
-+ * Has correct top of stack, but partial stack frame.
-+ */
-+ENTRY(int_ret_from_sys_call)
-+ CFI_STARTPROC simple
-+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
-+ /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
-+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
-+ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
-+ /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
-+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
-+ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
-+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
-+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
-+ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
-+ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
-+ CFI_REL_OFFSET r8,R8-ARGOFFSET
-+ CFI_REL_OFFSET r9,R9-ARGOFFSET
-+ CFI_REL_OFFSET r10,R10-ARGOFFSET
-+ CFI_REL_OFFSET r11,R11-ARGOFFSET
-+ XEN_BLOCK_EVENTS(%rsi)
-+ testb $3,CS-ARGOFFSET(%rsp)
-+ jnz 1f
-+ /* Need to set the proper %ss (not NULL) for ring 3 iretq */
-+ movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
-+ jmp retint_restore_args # retrun from ring3 kernel
-+1:
-+ movl $_TIF_ALLWORK_MASK,%edi
-+ /* edi: mask to check */
-+int_with_check:
-+ GET_THREAD_INFO(%rcx)
-+ movl threadinfo_flags(%rcx),%edx
-+ andl %edi,%edx
-+ jnz int_careful
-+ andl $~TS_COMPAT,threadinfo_status(%rcx)
-+ jmp retint_restore_args
-+
-+ /* Either reschedule or signal or syscall exit tracking needed. */
-+ /* First do a reschedule test. */
-+ /* edx: work, edi: workmask */
-+int_careful:
-+ bt $TIF_NEED_RESCHED,%edx
-+ jnc int_very_careful
-+/* sti */
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ pushq %rdi
-+ CFI_ADJUST_CFA_OFFSET 8
-+ call schedule
-+ popq %rdi
-+ CFI_ADJUST_CFA_OFFSET -8
-+ cli
-+ jmp int_with_check
-+
-+ /* handle signals and tracing -- both require a full stack frame */
-+int_very_careful:
-+/* sti */
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ SAVE_REST
-+ /* Check for syscall exit trace */
-+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
-+ jz int_signal
-+ pushq %rdi
-+ CFI_ADJUST_CFA_OFFSET 8
-+ leaq 8(%rsp),%rdi # &ptregs -> arg1
-+ call syscall_trace_leave
-+ popq %rdi
-+ CFI_ADJUST_CFA_OFFSET -8
-+ andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
-+ cli
-+ jmp int_restore_rest
-+
-+int_signal:
-+ testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
-+ jz 1f
-+ movq %rsp,%rdi # &ptregs -> arg1
-+ xorl %esi,%esi # oldset -> arg2
-+ call do_notify_resume
-+1: movl $_TIF_NEED_RESCHED,%edi
-+int_restore_rest:
-+ RESTORE_REST
-+ cli
-+ jmp int_with_check
-+ CFI_ENDPROC
-+
-+/*
-+ * Certain special system calls that need to save a complete full stack frame.
-+ */
-+
-+ .macro PTREGSCALL label,func,arg
-+ .globl \label
-+\label:
-+ leaq \func(%rip),%rax
-+ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
-+ jmp ptregscall_common
-+ .endm
-+
-+ CFI_STARTPROC
-+
-+ PTREGSCALL stub_clone, sys_clone, %r8
-+ PTREGSCALL stub_fork, sys_fork, %rdi
-+ PTREGSCALL stub_vfork, sys_vfork, %rdi
-+ PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
-+ PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
-+ PTREGSCALL stub_iopl, sys_iopl, %rsi
-+
-+ENTRY(ptregscall_common)
-+ popq %r11
-+ CFI_ADJUST_CFA_OFFSET -8
-+ CFI_REGISTER rip, r11
-+ SAVE_REST
-+ movq %r11, %r15
-+ CFI_REGISTER rip, r15
-+ FIXUP_TOP_OF_STACK %r11
-+ call *%rax
-+ RESTORE_TOP_OF_STACK %r11
-+ movq %r15, %r11
-+ CFI_REGISTER rip, r11
-+ RESTORE_REST
-+ pushq %r11
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rip, 0
-+ ret
-+ CFI_ENDPROC
-+
-+ENTRY(stub_execve)
-+ CFI_STARTPROC
-+ popq %r11
-+ CFI_ADJUST_CFA_OFFSET -8
-+ CFI_REGISTER rip, r11
-+ SAVE_REST
-+ movq %r11, %r15
-+ CFI_REGISTER rip, r15
-+ FIXUP_TOP_OF_STACK %r11
-+ call sys_execve
-+ GET_THREAD_INFO(%rcx)
-+ bt $TIF_IA32,threadinfo_flags(%rcx)
-+ CFI_REMEMBER_STATE
-+ jc exec_32bit
-+ RESTORE_TOP_OF_STACK %r11
-+ movq %r15, %r11
-+ CFI_REGISTER rip, r11
-+ RESTORE_REST
-+ pushq %r11
-+ CFI_ADJUST_CFA_OFFSET 8
-+ CFI_REL_OFFSET rip, 0
-+ ret
-+
-+exec_32bit:
-+ CFI_RESTORE_STATE
-+ movq %rax,RAX(%rsp)
-+ RESTORE_REST
-+ jmp int_ret_from_sys_call
-+ CFI_ENDPROC
-+
-+/*
-+ * sigreturn is special because it needs to restore all registers on return.
-+ * This cannot be done with SYSRET, so use the IRET return path instead.
-+ */
-+ENTRY(stub_rt_sigreturn)
-+ CFI_STARTPROC
-+ addq $8, %rsp
-+ CFI_ADJUST_CFA_OFFSET -8
-+ SAVE_REST
-+ movq %rsp,%rdi
-+ FIXUP_TOP_OF_STACK %r11
-+ call sys_rt_sigreturn
-+ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
-+ RESTORE_REST
-+ jmp int_ret_from_sys_call
-+ CFI_ENDPROC
-+
-+/*
-+ * initial frame state for interrupts and exceptions
-+ */
-+ .macro _frame ref
-+ CFI_STARTPROC simple
-+ CFI_DEF_CFA rsp,SS+8-\ref
-+ /*CFI_REL_OFFSET ss,SS-\ref*/
-+ CFI_REL_OFFSET rsp,RSP-\ref
-+ /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
-+ /*CFI_REL_OFFSET cs,CS-\ref*/
-+ CFI_REL_OFFSET rip,RIP-\ref
-+ .endm
-+
-+/* initial frame state for interrupts (and exceptions without error code) */
-+#define INTR_FRAME _frame RIP
-+/* initial frame state for exceptions with error code (and interrupts with
-+ vector already pushed) */
-+#define XCPT_FRAME _frame ORIG_RAX
-+
-+/*
-+ * Interrupt exit.
-+ *
-+ */
-+
-+retint_check:
-+ movl threadinfo_flags(%rcx),%edx
-+ andl %edi,%edx
-+ CFI_REMEMBER_STATE
-+ jnz retint_careful
-+retint_restore_args:
-+ movb EVENT_MASK-REST_SKIP(%rsp), %al
-+ notb %al # %al == ~saved_mask
-+ XEN_GET_VCPU_INFO(%rsi)
-+ andb evtchn_upcall_mask(%rsi),%al
-+ andb $1,%al # %al == mask & ~saved_mask
-+ jnz restore_all_enable_events # != 0 => reenable event delivery
-+ XEN_PUT_VCPU_INFO(%rsi)
-+
-+ RESTORE_ARGS 0,8,0
-+ HYPERVISOR_IRET 0
-+
-+ /* edi: workmask, edx: work */
-+retint_careful:
-+ CFI_RESTORE_STATE
-+ bt $TIF_NEED_RESCHED,%edx
-+ jnc retint_signal
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+/* sti */
-+ pushq %rdi
-+ CFI_ADJUST_CFA_OFFSET 8
-+ call schedule
-+ popq %rdi
-+ CFI_ADJUST_CFA_OFFSET -8
-+ XEN_BLOCK_EVENTS(%rsi)
-+ GET_THREAD_INFO(%rcx)
-+/* cli */
-+ jmp retint_check
-+
-+retint_signal:
-+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-+ jz retint_restore_args
-+ XEN_UNBLOCK_EVENTS(%rsi)
-+ SAVE_REST
-+ movq $-1,ORIG_RAX(%rsp)
-+ xorl %esi,%esi # oldset
-+ movq %rsp,%rdi # &pt_regs
-+ call do_notify_resume
-+ RESTORE_REST
-+ XEN_BLOCK_EVENTS(%rsi)
-+ movl $_TIF_NEED_RESCHED,%edi
-+ GET_THREAD_INFO(%rcx)
-+ jmp retint_check
-+
-+#ifdef CONFIG_PREEMPT
-+ /* Returning to kernel space. Check if we need preemption */
-+ /* rcx: threadinfo. interrupts off. */
-+ .p2align
-+retint_kernel:
-+ cmpl $0,threadinfo_preempt_count(%rcx)
-+ jnz retint_restore_args
-+ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
-+ jnc retint_restore_args
-+ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
-+ jnc retint_restore_args
-+ call preempt_schedule_irq
-+ jmp retint_kernel /* check again */
-+#endif
-+ CFI_ENDPROC
-+
-+/*
-+ * APIC interrupts.
-+ */
-+ .macro apicinterrupt num,func
-+ INTR_FRAME
-+ pushq $\num-256
-+ CFI_ADJUST_CFA_OFFSET 8
-+ interrupt \func
-+ jmp error_entry
-+ CFI_ENDPROC
-+ .endm
-+
-+#ifndef CONFIG_XEN
-+ENTRY(thermal_interrupt)
-+ apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
-+
-+ENTRY(threshold_interrupt)
-+ apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
-+
-+#ifdef CONFIG_SMP
-+ENTRY(reschedule_interrupt)
-+ apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
-+
-+ .macro INVALIDATE_ENTRY num
-+ENTRY(invalidate_interrupt\num)
-+ apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
-+ .endm
-+
-+ INVALIDATE_ENTRY 0
-+ INVALIDATE_ENTRY 1
-+ INVALIDATE_ENTRY 2
-+ INVALIDATE_ENTRY 3
-+ INVALIDATE_ENTRY 4
-+ INVALIDATE_ENTRY 5
-+ INVALIDATE_ENTRY 6
-+ INVALIDATE_ENTRY 7
-+
-+ENTRY(call_function_interrupt)
-+ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ENTRY(apic_timer_interrupt)
-+ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
-+
-+ENTRY(error_interrupt)
-+ apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
-+
-+ENTRY(spurious_interrupt)
-+ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
-+#endif
-+#endif /* !CONFIG_XEN */
-+
-+/*
-+ * Exception entry points.
-+ */
-+ .macro zeroentry sym
-+ INTR_FRAME
-+ movq (%rsp),%rcx
-+ movq 8(%rsp),%r11
-+ addq $0x10,%rsp /* skip rcx and r11 */
-+ pushq $0 /* push error code/oldrax */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ pushq %rax /* push real oldrax to the rdi slot */
-+ CFI_ADJUST_CFA_OFFSET 8
-+ leaq \sym(%rip),%rax
-+ jmp error_entry
-+ CFI_ENDPROC
-+ .endm
-+
-+ .macro errorentry sym
-+ XCPT_FRAME
-+ movq (%rsp),%rcx
-+ movq 8(%rsp),%r11
-+ addq $0x10,%rsp /* rsp points to the error code */
-+ pushq %rax
-+ CFI_ADJUST_CFA_OFFSET 8
-+ leaq \sym(%rip),%rax
-+ jmp error_entry
-+ CFI_ENDPROC
-+ .endm
-+
-+#if 0 /* not XEN */
-+ /* error code is on the stack already */
-+ /* handle NMI like exceptions that can happen everywhere */
-+ .macro paranoidentry sym, ist=0
-+ movq (%rsp),%rcx
-+ movq 8(%rsp),%r11
-+ addq $0x10,%rsp /* skip rcx and r11 */
-+ SAVE_ALL
-+ cld
-+#if 0 /* not XEN */
-+ movl $1,%ebx
-+ movl $MSR_GS_BASE,%ecx
-+ rdmsr
-+ testl %edx,%edx
-+ js 1f
-+ swapgs
-+ xorl %ebx,%ebx
-+1:
-+#endif
-+ .if \ist
-+ movq %gs:pda_data_offset, %rbp
-+ .endif
-+ movq %rsp,%rdi
-+ movq ORIG_RAX(%rsp),%rsi
-+ movq $-1,ORIG_RAX(%rsp)
-+ .if \ist
-+ subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
-+ .endif
-+ call \sym
-+ .if \ist
-+ addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
-+ .endif
-+/* cli */
-+ XEN_BLOCK_EVENTS(%rsi)
-+ .endm
-+#endif
-+
-+/*
-+ * Exception entry point. This expects an error code/orig_rax on the stack
-+ * and the exception handler in %rax.
-+ */
-+ENTRY(error_entry)
-+ _frame RDI
-+ /* rdi slot contains rax, oldrax contains error code */
-+ cld
-+ subq $14*8,%rsp
-+ CFI_ADJUST_CFA_OFFSET (14*8)
-+ movq %rsi,13*8(%rsp)
-+ CFI_REL_OFFSET rsi,RSI
-+ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
-+ movq %rdx,12*8(%rsp)
-+ CFI_REL_OFFSET rdx,RDX
-+ movq %rcx,11*8(%rsp)
-+ CFI_REL_OFFSET rcx,RCX
-+ movq %rsi,10*8(%rsp) /* store rax */
-+ CFI_REL_OFFSET rax,RAX
-+ movq %r8, 9*8(%rsp)
-+ CFI_REL_OFFSET r8,R8
-+ movq %r9, 8*8(%rsp)
-+ CFI_REL_OFFSET r9,R9
-+ movq %r10,7*8(%rsp)
-+ CFI_REL_OFFSET r10,R10
-+ movq %r11,6*8(%rsp)
-+ CFI_REL_OFFSET r11,R11
-+ movq %rbx,5*8(%rsp)
-+ CFI_REL_OFFSET rbx,RBX
-+ movq %rbp,4*8(%rsp)
-+ CFI_REL_OFFSET rbp,RBP
-+ movq %r12,3*8(%rsp)
-+ CFI_REL_OFFSET r12,R12
-+ movq %r13,2*8(%rsp)
-+ CFI_REL_OFFSET r13,R13
-+ movq %r14,1*8(%rsp)
-+ CFI_REL_OFFSET r14,R14
-+ movq %r15,(%rsp)
-+ CFI_REL_OFFSET r15,R15
-+#if 0
-+ cmpl $__KERNEL_CS,CS(%rsp)
-+ je error_kernelspace
-+#endif
-+error_call_handler:
-+ movq %rdi, RDI(%rsp)
-+ movq %rsp,%rdi
-+ movq ORIG_RAX(%rsp),%rsi # get error code
-+ movq $-1,ORIG_RAX(%rsp)
-+ call *%rax
-+error_exit:
-+ RESTORE_REST
-+/* cli */
-+ XEN_BLOCK_EVENTS(%rsi)
-+ GET_THREAD_INFO(%rcx)
-+ testb $3,CS-ARGOFFSET(%rsp)
-+ jz retint_kernel
-+ movl threadinfo_flags(%rcx),%edx
-+ movl $_TIF_WORK_MASK,%edi
-+ andl %edi,%edx
-+ jnz retint_careful
-+ jmp retint_restore_args
-+
-+error_kernelspace:
-+ /*
-+ * We need to re-write the logic here because we don't do iretq to
-+ * to return to user mode. It's still possible that we get trap/fault
-+ * in the kernel (when accessing buffers pointed to by system calls,
-+ * for example).
-+ *
-+ */
-+#if 0
-+ incl %ebx
-+ /* There are two places in the kernel that can potentially fault with
-+ usergs. Handle them here. The exception handlers after
-+ iret run with kernel gs again, so don't set the user space flag.
-+ B stepping K8s sometimes report an truncated RIP for IRET
-+ exceptions returning to compat mode. Check for these here too. */
-+ leaq iret_label(%rip),%rbp
-+ cmpq %rbp,RIP(%rsp)
-+ je error_swapgs
-+ movl %ebp,%ebp /* zero extend */
-+ cmpq %rbp,RIP(%rsp)
-+ je error_swapgs
-+ cmpq $gs_change,RIP(%rsp)
-+ je error_swapgs
-+ jmp error_sti
-+#endif
-+
-+ENTRY(hypervisor_callback)
-+ zeroentry do_hypervisor_callback
-+
-+/*
-+ * Copied from arch/xen/i386/kernel/entry.S
-+ */
-+# A note on the "critical region" in our callback handler.
-+# We want to avoid stacking callback handlers due to events occurring
-+# during handling of the last event. To do this, we keep events disabled
-+# until we've done all processing. HOWEVER, we must enable events before
-+# popping the stack frame (can't be done atomically) and so it would still
-+# be possible to get enough handler activations to overflow the stack.
-+# Although unlikely, bugs of that kind are hard to track down, so we'd
-+# like to avoid the possibility.
-+# So, on entry to the handler we detect whether we interrupted an
-+# existing activation in its critical region -- if so, we pop the current
-+# activation and restart the handler using the previous one.
-+ENTRY(do_hypervisor_callback) # do_hyperviosr_callback(struct *pt_regs)
-+# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
-+# see the correct pointer to the pt_regs
-+ movq %rdi, %rsp # we don't return, adjust the stack frame
-+11: movb $0, EVENT_MASK(%rdi)
-+ movq %gs:pda_irqstackptr,%rax
-+ incl %gs:pda_irqcount
-+ cmovzq %rax,%rsp
-+ pushq %rdi
-+ call evtchn_do_upcall
-+ popq %rsp
-+ decl %gs:pda_irqcount
-+ jmp error_exit
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ENTRY(nmi)
-+ zeroentry do_nmi_callback
-+ENTRY(do_nmi_callback)
-+ addq $8, %rsp
-+ call do_nmi
-+ orl $NMI_MASK,EFLAGS(%rsp)
-+ RESTORE_REST
-+ XEN_BLOCK_EVENTS(%rsi)
-+ GET_THREAD_INFO(%rcx)
-+ jmp retint_restore_args
-+#endif
-+
-+ ALIGN
-+restore_all_enable_events:
-+ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
-+
-+scrit: /**** START OF CRITICAL REGION ****/
-+ XEN_TEST_PENDING(%rsi)
-+ jnz 14f # process more events if necessary...
-+ XEN_PUT_VCPU_INFO(%rsi)
-+ RESTORE_ARGS 0,8,0
-+ HYPERVISOR_IRET 0
-+
-+14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
-+ XEN_PUT_VCPU_INFO(%rsi)
-+ SAVE_REST
-+ movq %rsp,%rdi # set the argument again
-+ jmp 11b
-+ecrit: /**** END OF CRITICAL REGION ****/
-+# At this point, unlike on x86-32, we don't do the fixup to simplify the
-+# code and the stack frame is more complex on x86-64.
-+# When the kernel is interrupted in the critical section, the kernel
-+# will do IRET in that case, and everything will be restored at that point,
-+# i.e. it just resumes from the next instruction interrupted with the same context.
-+
-+# Hypervisor uses this for application faults while it executes.
-+ENTRY(failsafe_callback)
-+ addq $0x10,%rsp /* skip rcx and r11 */
-+1: mov (%rsp),%ds
-+2: mov 8(%rsp),%es
-+3: mov 16(%rsp),%fs
-+4: mov 24(%rsp),%gs
-+ addq $0x20,%rsp /* skip the above selectors */
-+ SAVE_ALL
-+ jmp error_exit
-+.section .fixup,"ax"; \
-+6: movq $0,(%rsp); \
-+ jmp 1b; \
-+7: movq $0,8(%rsp); \
-+ jmp 2b; \
-+8: movq $0,16(%rsp); \
-+ jmp 3b; \
-+9: movq $0,24(%rsp); \
-+ jmp 4b; \
-+.previous; \
-+.section __ex_table,"a";\
-+ .align 16; \
-+ .quad 1b,6b; \
-+ .quad 2b,7b; \
-+ .quad 3b,8b; \
-+ .quad 4b,9b; \
-+.previous
-+
-+#if 0
-+ .section __ex_table,"a"
-+ .align 8
-+ .quad gs_change,bad_gs
-+ .previous
-+ .section .fixup,"ax"
-+ /* running with kernelgs */
-+bad_gs:
-+/* swapgs */ /* switch back to user gs */
-+ xorl %eax,%eax
-+ movl %eax,%gs
-+ jmp 2b
-+ .previous
-+#endif
-+
-+/*
-+ * Create a kernel thread.
-+ *
-+ * C extern interface:
-+ * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-+ *
-+ * asm input arguments:
-+ * rdi: fn, rsi: arg, rdx: flags
-+ */
-+ENTRY(kernel_thread)
-+ CFI_STARTPROC
-+ FAKE_STACK_FRAME $child_rip
-+ SAVE_ALL
-+
-+ # rdi: flags, rsi: usp, rdx: will be &pt_regs
-+ movq %rdx,%rdi
-+ orq kernel_thread_flags(%rip),%rdi
-+ movq $-1, %rsi
-+ movq %rsp, %rdx
-+
-+ xorl %r8d,%r8d
-+ xorl %r9d,%r9d
-+
-+ # clone now
-+ call do_fork
-+ movq %rax,RAX(%rsp)
-+ xorl %edi,%edi
-+
-+ /*
-+ * It isn't worth to check for reschedule here,
-+ * so internally to the x86_64 port you can rely on kernel_thread()
-+ * not to reschedule the child before returning, this avoids the need
-+ * of hacks for example to fork off the per-CPU idle tasks.
-+ * [Hopefully no generic code relies on the reschedule -AK]
-+ */
-+ RESTORE_ALL
-+ UNFAKE_STACK_FRAME
-+ ret
-+ CFI_ENDPROC
-+
-+
-+child_rip:
-+ /*
-+ * Here we are in the child and the registers are set as they were
-+ * at kernel_thread() invocation in the parent.
-+ */
-+ movq %rdi, %rax
-+ movq %rsi, %rdi
-+ call *%rax
-+ # exit
-+ xorl %edi, %edi
-+ call do_exit
-+
-+/*
-+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-+ *
-+ * C extern interface:
-+ * extern long execve(char *name, char **argv, char **envp)
-+ *
-+ * asm input arguments:
-+ * rdi: name, rsi: argv, rdx: envp
-+ *
-+ * We want to fallback into:
-+ * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
-+ *
-+ * do_sys_execve asm fallback arguments:
-+ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
-+ */
-+ENTRY(execve)
-+ CFI_STARTPROC
-+ FAKE_STACK_FRAME $0
-+ SAVE_ALL
-+ call sys_execve
-+ movq %rax, RAX(%rsp)
-+ RESTORE_REST
-+ testq %rax,%rax
-+ jne 1f
-+ jmp int_ret_from_sys_call
-+1: RESTORE_ARGS
-+ UNFAKE_STACK_FRAME
-+ ret
-+ CFI_ENDPROC
-+
-+KPROBE_ENTRY(page_fault)
-+ errorentry do_page_fault
-+ .previous .text
-+
-+ENTRY(coprocessor_error)
-+ zeroentry do_coprocessor_error
-+
-+ENTRY(simd_coprocessor_error)
-+ zeroentry do_simd_coprocessor_error
-+
-+ENTRY(device_not_available)
-+ zeroentry math_state_restore
-+
-+ /* runs on exception stack */
-+KPROBE_ENTRY(debug)
-+ INTR_FRAME
-+/* pushq $0
-+ CFI_ADJUST_CFA_OFFSET 8 */
-+ zeroentry do_debug
-+/* jmp paranoid_exit */
-+ CFI_ENDPROC
-+ .previous .text
-+
-+#if 0
-+ /* runs on exception stack */
-+KPROBE_ENTRY(nmi)
-+ INTR_FRAME
-+ pushq $-1
-+ CFI_ADJUST_CFA_OFFSET 8
-+ paranoidentry do_nmi
-+ /*
-+ * "Paranoid" exit path from exception stack.
-+ * Paranoid because this is used by NMIs and cannot take
-+ * any kernel state for granted.
-+ * We don't do kernel preemption checks here, because only
-+ * NMI should be common and it does not enable IRQs and
-+ * cannot get reschedule ticks.
-+ */
-+ /* ebx: no swapgs flag */
-+paranoid_exit:
-+ testl %ebx,%ebx /* swapgs needed? */
-+ jnz paranoid_restore
-+ testl $3,CS(%rsp)
-+ jnz paranoid_userspace
-+paranoid_swapgs:
-+ swapgs
-+paranoid_restore:
-+ RESTORE_ALL 8
-+ iretq
-+paranoid_userspace:
-+ GET_THREAD_INFO(%rcx)
-+ movl threadinfo_flags(%rcx),%ebx
-+ andl $_TIF_WORK_MASK,%ebx
-+ jz paranoid_swapgs
-+ movq %rsp,%rdi /* &pt_regs */
-+ call sync_regs
-+ movq %rax,%rsp /* switch stack for scheduling */
-+ testl $_TIF_NEED_RESCHED,%ebx
-+ jnz paranoid_schedule
-+ movl %ebx,%edx /* arg3: thread flags */
-+ sti
-+ xorl %esi,%esi /* arg2: oldset */
-+ movq %rsp,%rdi /* arg1: &pt_regs */
-+ call do_notify_resume
-+ cli
-+ jmp paranoid_userspace
-+paranoid_schedule:
-+ sti
-+ call schedule
-+ cli
-+ jmp paranoid_userspace
-+ CFI_ENDPROC
-+ .previous .text
-+#endif
-+
-+KPROBE_ENTRY(int3)
-+ INTR_FRAME
-+/* pushq $0
-+ CFI_ADJUST_CFA_OFFSET 8 */
-+ zeroentry do_int3
-+/* jmp paranoid_exit */
-+ CFI_ENDPROC
-+ .previous .text
-+
-+ENTRY(overflow)
-+ zeroentry do_overflow
-+
-+ENTRY(bounds)
-+ zeroentry do_bounds
-+
-+ENTRY(invalid_op)
-+ zeroentry do_invalid_op
-+
-+ENTRY(coprocessor_segment_overrun)
-+ zeroentry do_coprocessor_segment_overrun
-+
-+ENTRY(reserved)
-+ zeroentry do_reserved
-+
-+#if 0
-+ /* runs on exception stack */
-+ENTRY(double_fault)
-+ XCPT_FRAME
-+ paranoidentry do_double_fault
-+ jmp paranoid_exit
-+ CFI_ENDPROC
-+#endif
-+
-+ENTRY(invalid_TSS)
-+ errorentry do_invalid_TSS
-+
-+ENTRY(segment_not_present)
-+ errorentry do_segment_not_present
-+
-+ /* runs on exception stack */
-+ENTRY(stack_segment)
-+ XCPT_FRAME
-+ errorentry do_stack_segment
-+ CFI_ENDPROC
-+
-+KPROBE_ENTRY(general_protection)
-+ errorentry do_general_protection
-+ .previous .text
-+
-+ENTRY(alignment_check)
-+ errorentry do_alignment_check
-+
-+ENTRY(divide_error)
-+ zeroentry do_divide_error
-+
-+ENTRY(spurious_interrupt_bug)
-+ zeroentry do_spurious_interrupt_bug
-+
-+#ifdef CONFIG_X86_MCE
-+ /* runs on exception stack */
-+ENTRY(machine_check)
-+ INTR_FRAME
-+ pushq $0
-+ CFI_ADJUST_CFA_OFFSET 8
-+ paranoidentry do_machine_check
-+ jmp paranoid_exit
-+ CFI_ENDPROC
-+#endif
-+
-+ENTRY(call_softirq)
-+ CFI_STARTPROC
-+ movq %gs:pda_irqstackptr,%rax
-+ movq %rsp,%rdx
-+ CFI_DEF_CFA_REGISTER rdx
-+ incl %gs:pda_irqcount
-+ cmove %rax,%rsp
-+ pushq %rdx
-+ /*todo CFI_DEF_CFA_EXPRESSION ...*/
-+ call __do_softirq
-+ popq %rsp
-+ CFI_DEF_CFA_REGISTER rsp
-+ decl %gs:pda_irqcount
-+ ret
-+ CFI_ENDPROC
-diff --git a/arch/x86_64/kernel/genapic-xen.c b/arch/x86_64/kernel/genapic-xen.c
-new file mode 100644
-index 0000000..0af6bc3
---- /dev/null
-+++ b/arch/x86_64/kernel/genapic-xen.c
-@@ -0,0 +1,135 @@
-+/*
-+ * Copyright 2004 James Cleverdon, IBM.
-+ * Subject to the GNU Public License, v.2
-+ *
-+ * Generic APIC sub-arch probe layer.
-+ *
-+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
-+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
-+ * James Cleverdon.
-+ */
-+#include <linux/config.h>
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+
-+#include <asm/smp.h>
-+#include <asm/ipi.h>
-+
-+#if defined(CONFIG_ACPI)
-+#include <acpi/acpi_bus.h>
-+#endif
-+
-+/* which logical CPU number maps to which CPU (physical APIC ID) */
-+u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+#ifndef CONFIG_XEN
-+extern struct genapic apic_cluster;
-+extern struct genapic apic_flat;
-+extern struct genapic apic_physflat;
-+struct genapic *genapic = &apic_flat;
-+#else
-+extern struct genapic apic_xen;
-+struct genapic *genapic = &apic_xen;
-+#endif
-+
-+
-+/*
-+ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
-+ */
-+void __init clustered_apic_check(void)
-+{
-+#ifndef CONFIG_XEN
-+ long i;
-+ u8 clusters, max_cluster;
-+ u8 id;
-+ u8 cluster_cnt[NUM_APIC_CLUSTERS];
-+ int max_apic = 0;
-+
-+#if defined(CONFIG_ACPI)
-+ /*
-+ * Some x86_64 machines use physical APIC mode regardless of how many
-+ * procs/clusters are present (x86_64 ES7000 is an example).
-+ */
-+ if (acpi_fadt.revision > FADT2_REVISION_ID)
-+ if (acpi_fadt.force_apic_physical_destination_mode) {
-+ genapic = &apic_cluster;
-+ goto print;
-+ }
-+#endif
-+
-+ memset(cluster_cnt, 0, sizeof(cluster_cnt));
-+ for (i = 0; i < NR_CPUS; i++) {
-+ id = bios_cpu_apicid[i];
-+ if (id == BAD_APICID)
-+ continue;
-+ if (id > max_apic)
-+ max_apic = id;
-+ cluster_cnt[APIC_CLUSTERID(id)]++;
-+ }
-+
-+ /* Don't use clustered mode on AMD platforms. */
-+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
-+ genapic = &apic_physflat;
-+#ifndef CONFIG_HOTPLUG_CPU
-+ /* In the CPU hotplug case we cannot use broadcast mode
-+ because that opens a race when a CPU is removed.
-+ Stay at physflat mode in this case.
-+ It is bad to do this unconditionally though. Once
-+ we have ACPI platform support for CPU hotplug
-+ we should detect hotplug capablity from ACPI tables and
-+ only do this when really needed. -AK */
-+ if (max_apic <= 8)
-+ genapic = &apic_flat;
-+#endif
-+ goto print;
-+ }
-+
-+ clusters = 0;
-+ max_cluster = 0;
-+
-+ for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
-+ if (cluster_cnt[i] > 0) {
-+ ++clusters;
-+ if (cluster_cnt[i] > max_cluster)
-+ max_cluster = cluster_cnt[i];
-+ }
-+ }
-+
-+ /*
-+ * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
-+ * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
-+ * else physical mode.
-+ * (We don't use lowest priority delivery + HW APIC IRQ steering, so
-+ * can ignore the clustered logical case and go straight to physical.)
-+ */
-+ if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
-+#ifdef CONFIG_HOTPLUG_CPU
-+ /* Don't use APIC shortcuts in CPU hotplug to avoid races */
-+ genapic = &apic_physflat;
-+#else
-+ genapic = &apic_flat;
-+#endif
-+ } else
-+ genapic = &apic_cluster;
-+
-+print:
-+#else
-+ /* hardcode to xen apic functions */
-+ genapic = &apic_xen;
-+#endif
-+ printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
-+}
-+
-+/* Same for both flat and clustered. */
-+
-+void send_IPI_self(int vector)
-+{
-+ __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
-+}
-diff --git a/arch/x86_64/kernel/genapic_xen.c b/arch/x86_64/kernel/genapic_xen.c
-new file mode 100644
-index 0000000..b7a0d86
---- /dev/null
-+++ b/arch/x86_64/kernel/genapic_xen.c
-@@ -0,0 +1,162 @@
-+/*
-+ * Copyright 2004 James Cleverdon, IBM.
-+ * Subject to the GNU Public License, v.2
-+ *
-+ * Xen APIC subarch code. Maximum 8 CPUs, logical delivery.
-+ *
-+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
-+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
-+ * James Cleverdon.
-+ *
-+ * Hacked to pieces for Xen by Chris Wright.
-+ */
-+#include <linux/config.h>
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#include <asm/smp.h>
-+#include <asm/ipi.h>
-+#else
-+#include <asm/apic.h>
-+#include <asm/apicdef.h>
-+#include <asm/genapic.h>
-+#endif
-+#include <xen/evtchn.h>
-+
-+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-+
-+static inline void __send_IPI_one(unsigned int cpu, int vector)
-+{
-+ int irq = per_cpu(ipi_to_irq, cpu)[vector];
-+ BUG_ON(irq < 0);
-+ notify_remote_via_irq(irq);
-+}
-+
-+void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
-+{
-+ int cpu;
-+
-+ switch (shortcut) {
-+ case APIC_DEST_SELF:
-+ __send_IPI_one(smp_processor_id(), vector);
-+ break;
-+ case APIC_DEST_ALLBUT:
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu == smp_processor_id())
-+ continue;
-+ if (cpu_isset(cpu, cpu_online_map)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+ break;
-+ case APIC_DEST_ALLINC:
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu_isset(cpu, cpu_online_map)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+ break;
-+ default:
-+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-+ vector);
-+ break;
-+ }
-+}
-+
-+static cpumask_t xen_target_cpus(void)
-+{
-+ return cpu_online_map;
-+}
-+
-+/*
-+ * Set up the logical destination ID.
-+ * Do nothing, not called now.
-+ */
-+static void xen_init_apic_ldr(void)
-+{
-+ Dprintk("%s\n", __FUNCTION__);
-+ return;
-+}
-+
-+static void xen_send_IPI_allbutself(int vector)
-+{
-+ /*
-+ * if there are no other CPUs in the system then
-+ * we get an APIC send error if we try to broadcast.
-+ * thus we have to avoid sending IPIs in this case.
-+ */
-+ Dprintk("%s\n", __FUNCTION__);
-+ if (num_online_cpus() > 1)
-+ __send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
-+}
-+
-+static void xen_send_IPI_all(int vector)
-+{
-+ Dprintk("%s\n", __FUNCTION__);
-+ __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
-+}
-+
-+static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
-+{
-+ unsigned long mask = cpus_addr(cpumask)[0];
-+ unsigned int cpu;
-+ unsigned long flags;
-+
-+ Dprintk("%s\n", __FUNCTION__);
-+ local_irq_save(flags);
-+ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
-+
-+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+ if (cpu_isset(cpu, cpumask)) {
-+ __send_IPI_one(cpu, vector);
-+ }
-+ }
-+ local_irq_restore(flags);
-+}
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+static int xen_apic_id_registered(void)
-+{
-+ /* better be set */
-+ Dprintk("%s\n", __FUNCTION__);
-+ return physid_isset(smp_processor_id(), phys_cpu_present_map);
-+}
-+#endif
-+
-+static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
-+{
-+ Dprintk("%s\n", __FUNCTION__);
-+ return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
-+}
-+
-+static unsigned int phys_pkg_id(int index_msb)
-+{
-+ u32 ebx;
-+
-+ Dprintk("%s\n", __FUNCTION__);
-+ ebx = cpuid_ebx(1);
-+ return ((ebx >> 24) & 0xFF) >> index_msb;
-+}
-+
-+struct genapic apic_xen = {
-+ .name = "xen",
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+ .int_delivery_mode = dest_LowestPrio,
-+#endif
-+ .int_dest_mode = (APIC_DEST_LOGICAL != 0),
-+ .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
-+ .target_cpus = xen_target_cpus,
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+ .apic_id_registered = xen_apic_id_registered,
-+#endif
-+ .init_apic_ldr = xen_init_apic_ldr,
-+ .send_IPI_all = xen_send_IPI_all,
-+ .send_IPI_allbutself = xen_send_IPI_allbutself,
-+ .send_IPI_mask = xen_send_IPI_mask,
-+ .cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
-+ .phys_pkg_id = phys_pkg_id,
-+};
-diff --git a/arch/x86_64/kernel/head-xen.S b/arch/x86_64/kernel/head-xen.S
-new file mode 100644
-index 0000000..1f3e07b
---- /dev/null
-+++ b/arch/x86_64/kernel/head-xen.S
-@@ -0,0 +1,156 @@
-+/*
-+ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
-+ *
-+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
-+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
-+ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
-+ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
-+ *
-+ * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen
-+ */
-+
-+
-+#include <linux/linkage.h>
-+#include <linux/threads.h>
-+#include <linux/init.h>
-+#include <asm/desc.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/msr.h>
-+#include <asm/cache.h>
-+
-+ .text
-+ .code64
-+ .globl startup_64
-+startup_64:
-+ENTRY(_start)
-+ movq $(init_thread_union+THREAD_SIZE-8),%rsp
-+ /* zero EFLAGS after setting rsp */
-+ pushq $0
-+ popfq
-+
-+ /* rsi is pointer to startup info structure.
-+ pass it to C */
-+ movq %rsi,%rdi
-+ jmp x86_64_start_kernel
-+
-+ENTRY(stext)
-+ENTRY(_stext)
-+
-+ $page = 0
-+#define NEXT_PAGE(name) \
-+ $page = $page + 1; \
-+ .org $page * 0x1000; \
-+ phys_/**/name = $page * 0x1000 + __PHYSICAL_START; \
-+ENTRY(name)
-+
-+NEXT_PAGE(init_level4_pgt)
-+ /* This gets initialized in x86_64_start_kernel */
-+ .fill 512,8,0
-+
-+ /*
-+ * We update two pgd entries to make kernel and user pgd consistent
-+ * at pgd_populate(). It can be used for kernel modules. So we place
-+ * this page here for those cases to avoid memory corruption.
-+ * We also use this page to establish the initiali mapping for
-+ * vsyscall area.
-+ */
-+NEXT_PAGE(init_level4_user_pgt)
-+ .fill 512,8,0
-+
-+NEXT_PAGE(level3_kernel_pgt)
-+ .fill 512,8,0
-+
-+ /*
-+ * This is used for vsyscall area mapping as we have a different
-+ * level4 page table for user.
-+ */
-+NEXT_PAGE(level3_user_pgt)
-+ .fill 512,8,0
-+
-+NEXT_PAGE(level2_kernel_pgt)
-+ .fill 512,8,0
-+
-+NEXT_PAGE(empty_zero_page)
-+ .skip PAGE_SIZE
-+
-+NEXT_PAGE(hypercall_page)
-+ .fill 512,8,0
-+
-+#undef NEXT_PAGE
-+
-+ .data
-+
-+ .align 16
-+ .globl cpu_gdt_descr
-+cpu_gdt_descr:
-+ .word gdt_end-cpu_gdt_table
-+gdt:
-+ .quad cpu_gdt_table
-+#ifdef CONFIG_SMP
-+ .rept NR_CPUS-1
-+ .word 0
-+ .quad 0
-+ .endr
-+#endif
-+
-+/* We need valid kernel segments for data and code in long mode too
-+ * IRET will check the segment types kkeil 2000/10/28
-+ * Also sysret mandates a special GDT layout
-+ */
-+
-+ .section .data.page_aligned, "aw"
-+ .align PAGE_SIZE
-+
-+/* The TLS descriptors are currently at a different place compared to i386.
-+ Hopefully nobody expects them at a fixed place (Wine?) */
-+
-+ENTRY(cpu_gdt_table)
-+ .quad 0x0000000000000000 /* NULL descriptor */
-+ .quad 0x0 /* unused */
-+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
-+ .quad 0x00cf92000000ffff /* __KERNEL_DS */
-+ .quad 0x00cffa000000ffff /* __USER32_CS */
-+ .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
-+ .quad 0x00affa000000ffff /* __USER_CS */
-+ .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
-+ .quad 0,0 /* TSS */
-+ .quad 0,0 /* LDT */
-+ .quad 0,0,0 /* three TLS descriptors */
-+ .quad 0 /* unused */
-+gdt_end:
-+ /* asm/segment.h:GDT_ENTRIES must match this */
-+ /* This should be a multiple of the cache line size */
-+ /* GDTs of other CPUs are now dynamically allocated */
-+
-+ /* zero the remaining page */
-+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
-+
-+/*
-+ * __xen_guest information
-+ */
-+.macro utoh value
-+ .if (\value) < 0 || (\value) >= 0x10
-+ utoh (((\value)>>4)&0x0fffffffffffffff)
-+ .endif
-+ .if ((\value) & 0xf) < 10
-+ .byte '0' + ((\value) & 0xf)
-+ .else
-+ .byte 'A' + ((\value) & 0xf) - 10
-+ .endif
-+.endm
-+
-+.section __xen_guest
-+ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
-+ .ascii ",XEN_VER=xen-3.0"
-+ .ascii ",VIRT_BASE=0x"; utoh __START_KERNEL_map
-+ .ascii ",HYPERCALL_PAGE=0x"; utoh (phys_hypercall_page >> PAGE_SHIFT)
-+ .ascii ",FEATURES=writable_page_tables"
-+ .ascii "|writable_descriptor_tables"
-+ .ascii "|auto_translated_physmap"
-+ .ascii "|supervisor_mode_kernel"
-+ .ascii ",LOADER=generic"
-+ .byte 0
-diff --git a/arch/x86_64/kernel/head64-xen.c b/arch/x86_64/kernel/head64-xen.c
-new file mode 100644
-index 0000000..77176d0
---- /dev/null
-+++ b/arch/x86_64/kernel/head64-xen.c
-@@ -0,0 +1,140 @@
-+/*
-+ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
-+ *
-+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
-+ *
-+ * $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/linkage.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/percpu.h>
-+
-+#include <asm/processor.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+#include <asm/bootsetup.h>
-+#include <asm/setup.h>
-+#include <asm/desc.h>
-+#include <asm/pgtable.h>
-+#include <asm/sections.h>
-+
-+unsigned long start_pfn;
-+
-+/* Don't add a printk in there. printk relies on the PDA which is not initialized
-+ yet. */
-+#if 0
-+static void __init clear_bss(void)
-+{
-+ memset(__bss_start, 0,
-+ (unsigned long) __bss_stop - (unsigned long) __bss_start);
-+}
-+#endif
-+
-+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
-+#define OLD_CL_MAGIC_ADDR 0x90020
-+#define OLD_CL_MAGIC 0xA33F
-+#define OLD_CL_BASE_ADDR 0x90000
-+#define OLD_CL_OFFSET 0x90022
-+
-+extern char saved_command_line[];
-+
-+static void __init copy_bootdata(char *real_mode_data)
-+{
-+#ifndef CONFIG_XEN
-+ int new_data;
-+ char * command_line;
-+
-+ memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
-+ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
-+ if (!new_data) {
-+ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
-+ printk("so old bootloader that it does not support commandline?!\n");
-+ return;
-+ }
-+ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
-+ printk("old bootloader convention, maybe loadlin?\n");
-+ }
-+ command_line = (char *) ((u64)(new_data));
-+ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
-+#else
-+ int max_cmdline;
-+
-+ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
-+ max_cmdline = COMMAND_LINE_SIZE;
-+ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
-+ saved_command_line[max_cmdline-1] = '\0';
-+#endif
-+ printk("Bootdata ok (command line is %s)\n", saved_command_line);
-+}
-+
-+static void __init setup_boot_cpu_data(void)
-+{
-+ unsigned int dummy, eax;
-+
-+ /* get vendor info */
-+ cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
-+ (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
-+ (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
-+ (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
-+
-+ /* get cpu type */
-+ cpuid(1, &eax, &dummy, &dummy,
-+ (unsigned int *) &boot_cpu_data.x86_capability);
-+ boot_cpu_data.x86 = (eax >> 8) & 0xf;
-+ boot_cpu_data.x86_model = (eax >> 4) & 0xf;
-+ boot_cpu_data.x86_mask = eax & 0xf;
-+}
-+
-+void __init x86_64_start_kernel(char * real_mode_data)
-+{
-+ char *s;
-+ int i;
-+
-+ xen_start_info = (struct start_info *)real_mode_data;
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ phys_to_machine_mapping =
-+ (unsigned long *)xen_start_info->mfn_list;
-+ start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
-+ xen_start_info->nr_pt_frames;
-+ }
-+
-+#if 0
-+ for (i = 0; i < 256; i++)
-+ set_intr_gate(i, early_idt_handler);
-+ asm volatile("lidt %0" :: "m" (idt_descr));
-+#endif
-+
-+ for (i = 0; i < NR_CPUS; i++)
-+ cpu_pda(i) = &boot_cpu_pda[i];
-+
-+ pda_init(0);
-+ copy_bootdata(real_mode_data);
-+#ifdef CONFIG_SMP
-+ cpu_set(0, cpu_online_map);
-+#endif
-+ s = strstr(saved_command_line, "earlyprintk=");
-+ if (s != NULL)
-+ setup_early_printk(strchr(s, '=') + 1);
-+#ifdef CONFIG_NUMA
-+ s = strstr(saved_command_line, "numa=");
-+ if (s != NULL)
-+ numa_setup(s+5);
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+ if (strstr(saved_command_line, "disableapic"))
-+ disable_apic = 1;
-+#endif
-+ /* You need early console to see that */
-+ if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
-+ panic("Kernel too big for kernel mapping\n");
-+
-+ setup_boot_cpu_data();
-+ start_kernel();
-+}
-diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
-index a5d7e16..724f7b8 100644
---- a/arch/x86_64/kernel/i387.c
-+++ b/arch/x86_64/kernel/i387.c
-@@ -44,7 +44,6 @@ void mxcsr_feature_mask_init(void)
- */
- void __cpuinit fpu_init(void)
- {
-- unsigned long oldcr0 = read_cr0();
- extern void __bad_fxsave_alignment(void);
-
- if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
-@@ -52,7 +51,9 @@ void __cpuinit fpu_init(void)
- set_in_cr4(X86_CR4_OSFXSR);
- set_in_cr4(X86_CR4_OSXMMEXCPT);
-
-- write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
-+#ifndef CONFIG_XEN
-+ write_cr0(read_cr0() & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
-+#endif
-
- mxcsr_feature_mask_init();
- /* clean state in init */
-diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c
-index ce31d90..9c141e1 100644
---- a/arch/x86_64/kernel/init_task.c
-+++ b/arch/x86_64/kernel/init_task.c
-@@ -37,6 +37,8 @@ union thread_union init_thread_union
- struct task_struct init_task = INIT_TASK(init_task);
-
- EXPORT_SYMBOL(init_task);
-+
-+#ifndef CONFIG_X86_NO_TSS
- /*
- * per-CPU TSS segments. Threads are completely 'soft' on Linux,
- * no more per-task TSS's. The TSS size is kept cacheline-aligned
-@@ -45,5 +47,6 @@ EXPORT_SYMBOL(init_task);
- * on exact cacheline boundaries, to eliminate cacheline ping-pong.
- */
- DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
-+#endif
-
- #define ALIGN_TO_4K __attribute__((section(".data.init_task")))
-diff --git a/arch/x86_64/kernel/io_apic-xen.c b/arch/x86_64/kernel/io_apic-xen.c
-new file mode 100644
-index 0000000..fd72947
---- /dev/null
-+++ b/arch/x86_64/kernel/io_apic-xen.c
-@@ -0,0 +1,2231 @@
-+/*
-+ * Intel IO-APIC support for multi-Pentium hosts.
-+ *
-+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
-+ *
-+ * Many thanks to Stig Venaas for trying out countless experimental
-+ * patches and reporting/debugging problems patiently!
-+ *
-+ * (c) 1999, Multiple IO-APIC support, developed by
-+ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
-+ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
-+ * further tested and cleaned up by Zach Brown <zab@redhat.com>
-+ * and Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
-+ * thanks to Eric Gilmore
-+ * and Rolf G. Tews
-+ * for testing these extensively
-+ * Paul Diefenbaugh : Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+#include <linux/config.h>
-+#include <linux/smp_lock.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+#include <linux/sysdev.h>
-+#ifdef CONFIG_ACPI
-+#include <acpi/acpi_bus.h>
-+#endif
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/mach_apic.h>
-+#include <asm/acpi.h>
-+#include <asm/dma.h>
-+
-+#define __apicdebuginit __init
-+
-+int sis_apic_bug; /* not actually supported, dummy for compile */
-+
-+static int no_timer_check;
-+
-+int disable_timer_pin_1 __initdata;
-+
-+#ifndef CONFIG_XEN
-+int timer_over_8254 __initdata = 1;
-+
-+/* Where if anywhere is the i8259 connect in external int mode */
-+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-+#endif
-+
-+static DEFINE_SPINLOCK(ioapic_lock);
-+
-+/*
-+ * # of IRQ routing registers
-+ */
-+int nr_ioapic_registers[MAX_IO_APICS];
-+
-+/*
-+ * Rough estimation of how many shared IRQs there are, can
-+ * be changed anytime.
-+ */
-+#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
-+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
-+
-+/*
-+ * This is performance-critical, we want to do it O(1)
-+ *
-+ * the indexing order of this array favors 1:1 mappings
-+ * between pins and IRQs.
-+ */
-+
-+static struct irq_pin_list {
-+ short apic, pin, next;
-+} irq_2_pin[PIN_MAP_SIZE];
-+
-+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
-+#ifdef CONFIG_PCI_MSI
-+#define vector_to_irq(vector) \
-+ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
-+#else
-+#define vector_to_irq(vector) (vector)
-+#endif
-+
-+#ifdef CONFIG_XEN
-+
-+#include <xen/interface/xen.h>
-+#include <xen/interface/physdev.h>
-+
-+/* Fake i8259 */
-+#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
-+#define disable_8259A_irq(_irq) ((void)0)
-+#define i8259A_irq_pending(_irq) (0)
-+
-+unsigned long io_apic_irqs;
-+
-+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
-+{
-+ physdev_op_t op;
-+ int ret;
-+
-+ op.cmd = PHYSDEVOP_APIC_READ;
-+ op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+ op.u.apic_op.reg = reg;
-+ ret = HYPERVISOR_physdev_op(&op);
-+ if (ret)
-+ return ret;
-+ return op.u.apic_op.value;
-+}
-+
-+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+ physdev_op_t op;
-+
-+ op.cmd = PHYSDEVOP_APIC_WRITE;
-+ op.u.apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
-+ op.u.apic_op.reg = reg;
-+ op.u.apic_op.value = value;
-+ HYPERVISOR_physdev_op(&op);
-+}
-+
-+#define io_apic_read(a,r) xen_io_apic_read(a,r)
-+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
-+
-+#define clear_IO_APIC() ((void)0)
-+
-+#else
-+
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
-+{
-+ unsigned long flags;
-+ unsigned int dest;
-+ cpumask_t tmp;
-+
-+ cpus_and(tmp, mask, cpu_online_map);
-+ if (cpus_empty(tmp))
-+ tmp = TARGET_CPUS;
-+
-+ cpus_and(mask, tmp, CPU_MASK_ALL);
-+
-+ dest = cpu_mask_to_apicid(mask);
-+
-+ /*
-+ * Only the high 8 bits are valid.
-+ */
-+ dest = SET_APIC_LOGICAL_ID(dest);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __DO_ACTION(1, = dest, )
-+ set_irq_info(irq, mask);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+#endif
-+
-+#endif /* !CONFIG_XEN */
-+
-+/*
-+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
-+ * shared ISA-space IRQs, so we have to support them. We are super
-+ * fast in the common case, and fast for shared ISA-space IRQs.
-+ */
-+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-+{
-+ static int first_free_entry = NR_IRQS;
-+ struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+ BUG_ON(irq >= NR_IRQS);
-+ while (entry->next)
-+ entry = irq_2_pin + entry->next;
-+
-+ if (entry->pin != -1) {
-+ entry->next = first_free_entry;
-+ entry = irq_2_pin + entry->next;
-+ if (++first_free_entry >= PIN_MAP_SIZE)
-+ panic("io_apic.c: ran out of irq_2_pin entries!");
-+ }
-+ entry->apic = apic;
-+ entry->pin = pin;
-+}
-+
-+#ifndef CONFIG_XEN
-+#define __DO_ACTION(R, ACTION, FINAL) \
-+ \
-+{ \
-+ int pin; \
-+ struct irq_pin_list *entry = irq_2_pin + irq; \
-+ \
-+ BUG_ON(irq >= NR_IRQS); \
-+ for (;;) { \
-+ unsigned int reg; \
-+ pin = entry->pin; \
-+ if (pin == -1) \
-+ break; \
-+ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
-+ reg ACTION; \
-+ io_apic_modify(entry->apic, reg); \
-+ if (!entry->next) \
-+ break; \
-+ entry = irq_2_pin + entry->next; \
-+ } \
-+ FINAL; \
-+}
-+
-+#define DO_ACTION(name,R,ACTION, FINAL) \
-+ \
-+ static void name##_IO_APIC_irq (unsigned int irq) \
-+ __DO_ACTION(R, ACTION, FINAL)
-+
-+DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
-+ /* mask = 1 */
-+DO_ACTION( __unmask, 0, &= 0xfffeffff, )
-+ /* mask = 0 */
-+
-+static void mask_IO_APIC_irq (unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __mask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void unmask_IO_APIC_irq (unsigned int irq)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ __unmask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-+{
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ if (entry.delivery_mode == dest_SMI)
-+ return;
-+ /*
-+ * Disable it in the IO-APIC irq-routing table:
-+ */
-+ memset(&entry, 0, sizeof(entry));
-+ entry.mask = 1;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
-+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC (void)
-+{
-+ int apic, pin;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++)
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-+ clear_IO_APIC_pin(apic, pin);
-+}
-+
-+#endif /* !CONFIG_XEN */
-+
-+static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
-+
-+/*
-+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
-+ * specific CPU-side IRQs.
-+ */
-+
-+#define MAX_PIRQS 8
-+static int pirq_entries [MAX_PIRQS];
-+static int pirqs_enabled;
-+int skip_ioapic_setup;
-+int ioapic_force;
-+
-+/* dummy parsing: see setup.c */
-+
-+static int __init disable_ioapic_setup(char *str)
-+{
-+ skip_ioapic_setup = 1;
-+ return 1;
-+}
-+
-+static int __init enable_ioapic_setup(char *str)
-+{
-+ ioapic_force = 1;
-+ skip_ioapic_setup = 0;
-+ return 1;
-+}
-+
-+__setup("noapic", disable_ioapic_setup);
-+__setup("apic", enable_ioapic_setup);
-+
-+#ifndef CONFIG_XEN
-+static int __init setup_disable_8254_timer(char *s)
-+{
-+ timer_over_8254 = -1;
-+ return 1;
-+}
-+static int __init setup_enable_8254_timer(char *s)
-+{
-+ timer_over_8254 = 2;
-+ return 1;
-+}
-+
-+__setup("disable_8254_timer", setup_disable_8254_timer);
-+__setup("enable_8254_timer", setup_enable_8254_timer);
-+#endif /* !CONFIG_XEN */
-+
-+#include <asm/pci-direct.h>
-+#include <linux/pci_ids.h>
-+#include <linux/pci.h>
-+
-+/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
-+ off. Check for an Nvidia or VIA PCI bridge and turn it off.
-+ Use pci direct infrastructure because this runs before the PCI subsystem.
-+
-+ Can be overwritten with "apic"
-+
-+ And another hack to disable the IOMMU on VIA chipsets.
-+
-+ ... and others. Really should move this somewhere else.
-+
-+ Kludge-O-Rama. */
-+void __init check_ioapic(void)
-+{
-+ int num,slot,func;
-+ /* Poor man's PCI discovery */
-+ for (num = 0; num < 32; num++) {
-+ for (slot = 0; slot < 32; slot++) {
-+ for (func = 0; func < 8; func++) {
-+ u32 class;
-+ u32 vendor;
-+ u8 type;
-+ class = read_pci_config(num,slot,func,
-+ PCI_CLASS_REVISION);
-+ if (class == 0xffffffff)
-+ break;
-+
-+ if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
-+ continue;
-+
-+ vendor = read_pci_config(num, slot, func,
-+ PCI_VENDOR_ID);
-+ vendor &= 0xffff;
-+ switch (vendor) {
-+ case PCI_VENDOR_ID_VIA:
-+#ifdef CONFIG_GART_IOMMU
-+ if ((end_pfn > MAX_DMA32_PFN ||
-+ force_iommu) &&
-+ !iommu_aperture_allowed) {
-+ printk(KERN_INFO
-+ "Looks like a VIA chipset. Disabling IOMMU. Overwrite with \"iommu=allowed\"\n");
-+ iommu_aperture_disabled = 1;
-+ }
-+#endif
-+ return;
-+ case PCI_VENDOR_ID_NVIDIA:
-+#ifdef CONFIG_ACPI
-+ /* All timer overrides on Nvidia
-+ seem to be wrong. Skip them. */
-+ acpi_skip_timer_override = 1;
-+ printk(KERN_INFO
-+ "Nvidia board detected. Ignoring ACPI timer override.\n");
-+#endif
-+ /* RED-PEN skip them on mptables too? */
-+ return;
-+ case PCI_VENDOR_ID_ATI:
-+
-+ /* This should be actually default, but
-+ for 2.6.16 let's do it for ATI only where
-+ it's really needed. */
-+#ifndef CONFIG_XEN
-+ if (timer_over_8254 == 1) {
-+ timer_over_8254 = 0;
-+ printk(KERN_INFO
-+ "ATI board detected. Disabling timer routing over 8254.\n");
-+ }
-+#endif
-+ return;
-+ }
-+
-+
-+ /* No multi-function device? */
-+ type = read_pci_config_byte(num,slot,func,
-+ PCI_HEADER_TYPE);
-+ if (!(type & 0x80))
-+ break;
-+ }
-+ }
-+ }
-+}
-+
-+static int __init ioapic_pirq_setup(char *str)
-+{
-+ int i, max;
-+ int ints[MAX_PIRQS+1];
-+
-+ get_options(str, ARRAY_SIZE(ints), ints);
-+
-+ for (i = 0; i < MAX_PIRQS; i++)
-+ pirq_entries[i] = -1;
-+
-+ pirqs_enabled = 1;
-+ apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
-+ max = MAX_PIRQS;
-+ if (ints[0] < MAX_PIRQS)
-+ max = ints[0];
-+
-+ for (i = 0; i < max; i++) {
-+ apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-+ /*
-+ * PIRQs are mapped upside down, usually.
-+ */
-+ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-+ }
-+ return 1;
-+}
-+
-+__setup("pirq=", ioapic_pirq_setup);
-+
-+/*
-+ * Find the IRQ entry number of a certain pin.
-+ */
-+static int find_irq_entry(int apic, int pin, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++)
-+ if (mp_irqs[i].mpc_irqtype == type &&
-+ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
-+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
-+ mp_irqs[i].mpc_dstirq == pin)
-+ return i;
-+
-+ return -1;
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Find the pin to which IRQ[irq] (ISA) is connected
-+ */
-+static int __init find_isa_irq_pin(int irq, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
-+ (mp_irqs[i].mpc_irqtype == type) &&
-+ (mp_irqs[i].mpc_srcbusirq == irq))
-+
-+ return mp_irqs[i].mpc_dstirq;
-+ }
-+ return -1;
-+}
-+
-+static int __init find_isa_irq_apic(int irq, int type)
-+{
-+ int i;
-+
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
-+ (mp_irqs[i].mpc_irqtype == type) &&
-+ (mp_irqs[i].mpc_srcbusirq == irq))
-+ break;
-+ }
-+ if (i < mp_irq_entries) {
-+ int apic;
-+ for(apic = 0; apic < nr_ioapics; apic++) {
-+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
-+ return apic;
-+ }
-+ }
-+
-+ return -1;
-+}
-+#endif
-+
-+/*
-+ * Find a specific PCI IRQ entry.
-+ * Not an __init, possibly needed by modules
-+ */
-+static int pin_2_irq(int idx, int apic, int pin);
-+
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-+{
-+ int apic, i, best_guess = -1;
-+
-+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
-+ bus, slot, pin);
-+ if (mp_bus_id_to_pci_bus[bus] == -1) {
-+ apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-+ return -1;
-+ }
-+ for (i = 0; i < mp_irq_entries; i++) {
-+ int lbus = mp_irqs[i].mpc_srcbus;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++)
-+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
-+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
-+ break;
-+
-+ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
-+ !mp_irqs[i].mpc_irqtype &&
-+ (bus == lbus) &&
-+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
-+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
-+
-+ if (!(apic || IO_APIC_IRQ(irq)))
-+ continue;
-+
-+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
-+ return irq;
-+ /*
-+ * Use the first all-but-pin matching entry as a
-+ * best-guess fuzzy result for broken mptables.
-+ */
-+ if (best_guess < 0)
-+ best_guess = irq;
-+ }
-+ }
-+ BUG_ON(best_guess >= NR_IRQS);
-+ return best_guess;
-+}
-+
-+/*
-+ * EISA Edge/Level control register, ELCR
-+ */
-+static int EISA_ELCR(unsigned int irq)
-+{
-+ if (irq < 16) {
-+ unsigned int port = 0x4d0 + (irq >> 3);
-+ return (inb(port) >> (irq & 7)) & 1;
-+ }
-+ apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
-+ return 0;
-+}
-+
-+/* EISA interrupts are always polarity zero and can be edge or level
-+ * trigger depending on the ELCR value. If an interrupt is listed as
-+ * EISA conforming in the MP table, that means its trigger type must
-+ * be read in from the ELCR */
-+
-+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-+#define default_EISA_polarity(idx) (0)
-+
-+/* ISA interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_ISA_trigger(idx) (0)
-+#define default_ISA_polarity(idx) (0)
-+
-+/* PCI interrupts are always polarity one level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_PCI_trigger(idx) (1)
-+#define default_PCI_polarity(idx) (1)
-+
-+/* MCA interrupts are always polarity zero level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_MCA_trigger(idx) (1)
-+#define default_MCA_polarity(idx) (0)
-+
-+static int __init MPBIOS_polarity(int idx)
-+{
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+ int polarity;
-+
-+ /*
-+ * Determine IRQ line polarity (high active or low active):
-+ */
-+ switch (mp_irqs[idx].mpc_irqflag & 3)
-+ {
-+ case 0: /* conforms, ie. bus-type dependent polarity */
-+ {
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ {
-+ polarity = default_ISA_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_EISA: /* EISA pin */
-+ {
-+ polarity = default_EISA_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ polarity = default_PCI_polarity(idx);
-+ break;
-+ }
-+ case MP_BUS_MCA: /* MCA pin */
-+ {
-+ polarity = default_MCA_polarity(idx);
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ }
-+ break;
-+ }
-+ case 1: /* high active */
-+ {
-+ polarity = 0;
-+ break;
-+ }
-+ case 2: /* reserved */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ case 3: /* low active */
-+ {
-+ polarity = 1;
-+ break;
-+ }
-+ default: /* invalid */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ polarity = 1;
-+ break;
-+ }
-+ }
-+ return polarity;
-+}
-+
-+static int MPBIOS_trigger(int idx)
-+{
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+ int trigger;
-+
-+ /*
-+ * Determine IRQ trigger mode (edge or level sensitive):
-+ */
-+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
-+ {
-+ case 0: /* conforms, ie. bus-type dependent */
-+ {
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ {
-+ trigger = default_ISA_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_EISA: /* EISA pin */
-+ {
-+ trigger = default_EISA_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ trigger = default_PCI_trigger(idx);
-+ break;
-+ }
-+ case MP_BUS_MCA: /* MCA pin */
-+ {
-+ trigger = default_MCA_trigger(idx);
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 1;
-+ break;
-+ }
-+ }
-+ break;
-+ }
-+ case 1: /* edge */
-+ {
-+ trigger = 0;
-+ break;
-+ }
-+ case 2: /* reserved */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 1;
-+ break;
-+ }
-+ case 3: /* level */
-+ {
-+ trigger = 1;
-+ break;
-+ }
-+ default: /* invalid */
-+ {
-+ printk(KERN_WARNING "broken BIOS!!\n");
-+ trigger = 0;
-+ break;
-+ }
-+ }
-+ return trigger;
-+}
-+
-+static inline int irq_polarity(int idx)
-+{
-+ return MPBIOS_polarity(idx);
-+}
-+
-+static inline int irq_trigger(int idx)
-+{
-+ return MPBIOS_trigger(idx);
-+}
-+
-+static int next_irq = 16;
-+
-+/*
-+ * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
-+ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
-+ * from ACPI, which can reach 800 in large boxen.
-+ *
-+ * Compact the sparse GSI space into a sequential IRQ series and reuse
-+ * vectors if possible.
-+ */
-+int gsi_irq_sharing(int gsi)
-+{
-+ int i, tries, vector;
-+
-+ BUG_ON(gsi >= NR_IRQ_VECTORS);
-+
-+ if (platform_legacy_irq(gsi))
-+ return gsi;
-+
-+ if (gsi_2_irq[gsi] != 0xFF)
-+ return (int)gsi_2_irq[gsi];
-+
-+ tries = NR_IRQS;
-+ try_again:
-+ vector = assign_irq_vector(gsi);
-+
-+ /*
-+ * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
-+ * use of vector and if found, return that IRQ. However, we never want
-+ * to share legacy IRQs, which usually have a different trigger mode
-+ * than PCI.
-+ */
-+ for (i = 0; i < NR_IRQS; i++)
-+ if (IO_APIC_VECTOR(i) == vector)
-+ break;
-+ if (platform_legacy_irq(i)) {
-+ if (--tries >= 0) {
-+ IO_APIC_VECTOR(i) = 0;
-+ goto try_again;
-+ }
-+ panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
-+ }
-+ if (i < NR_IRQS) {
-+ gsi_2_irq[gsi] = i;
-+ printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
-+ gsi, vector, i);
-+ return i;
-+ }
-+
-+ i = next_irq++;
-+ BUG_ON(i >= NR_IRQS);
-+ gsi_2_irq[gsi] = i;
-+ IO_APIC_VECTOR(i) = vector;
-+ printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
-+ gsi, vector, i);
-+ return i;
-+}
-+
-+static int pin_2_irq(int idx, int apic, int pin)
-+{
-+ int irq, i;
-+ int bus = mp_irqs[idx].mpc_srcbus;
-+
-+ /*
-+ * Debugging check, we are in big trouble if this message pops up!
-+ */
-+ if (mp_irqs[idx].mpc_dstirq != pin)
-+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-+
-+ switch (mp_bus_id_to_type[bus])
-+ {
-+ case MP_BUS_ISA: /* ISA pin */
-+ case MP_BUS_EISA:
-+ case MP_BUS_MCA:
-+ {
-+ irq = mp_irqs[idx].mpc_srcbusirq;
-+ break;
-+ }
-+ case MP_BUS_PCI: /* PCI pin */
-+ {
-+ /*
-+ * PCI IRQs are mapped in order
-+ */
-+ i = irq = 0;
-+ while (i < apic)
-+ irq += nr_ioapic_registers[i++];
-+ irq += pin;
-+ irq = gsi_irq_sharing(irq);
-+ break;
-+ }
-+ default:
-+ {
-+ printk(KERN_ERR "unknown bus type %d.\n",bus);
-+ irq = 0;
-+ break;
-+ }
-+ }
-+ BUG_ON(irq >= NR_IRQS);
-+
-+ /*
-+ * PCI IRQ command line redirection. Yes, limits are hardcoded.
-+ */
-+ if ((pin >= 16) && (pin <= 23)) {
-+ if (pirq_entries[pin-16] != -1) {
-+ if (!pirq_entries[pin-16]) {
-+ apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
-+ } else {
-+ irq = pirq_entries[pin-16];
-+ apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
-+ pin-16, irq);
-+ }
-+ }
-+ }
-+ BUG_ON(irq >= NR_IRQS);
-+ return irq;
-+}
-+
-+static inline int IO_APIC_irq_trigger(int irq)
-+{
-+ int apic, idx, pin;
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+ idx = find_irq_entry(apic,pin,mp_INT);
-+ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
-+ return irq_trigger(idx);
-+ }
-+ }
-+ /*
-+ * nonexistent IRQs are edge default
-+ */
-+ return 0;
-+}
-+
-+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
-+
-+int assign_irq_vector(int irq)
-+{
-+ static int current_vector = FIRST_DEVICE_VECTOR;
-+ physdev_op_t op;
-+
-+ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
-+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
-+ return IO_APIC_VECTOR(irq);
-+
-+ op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
-+ op.u.irq_op.irq = irq;
-+ if (HYPERVISOR_physdev_op(&op))
-+ return -ENOSPC;
-+ current_vector = op.u.irq_op.vector;
-+
-+ vector_irq[current_vector] = irq;
-+ if (irq != AUTO_ASSIGN)
-+ IO_APIC_VECTOR(irq) = current_vector;
-+
-+ return current_vector;
-+}
-+
-+extern void (*interrupt[NR_IRQS])(void);
-+#ifndef CONFIG_XEN
-+static struct hw_interrupt_type ioapic_level_type;
-+static struct hw_interrupt_type ioapic_edge_type;
-+
-+#define IOAPIC_AUTO -1
-+#define IOAPIC_EDGE 0
-+#define IOAPIC_LEVEL 1
-+
-+static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
-+{
-+ if (use_pci_vector() && !platform_legacy_irq(irq)) {
-+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+ trigger == IOAPIC_LEVEL)
-+ irq_desc[vector].handler = &ioapic_level_type;
-+ else
-+ irq_desc[vector].handler = &ioapic_edge_type;
-+ set_intr_gate(vector, interrupt[vector]);
-+ } else {
-+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+ trigger == IOAPIC_LEVEL)
-+ irq_desc[irq].handler = &ioapic_level_type;
-+ else
-+ irq_desc[irq].handler = &ioapic_edge_type;
-+ set_intr_gate(vector, interrupt[irq]);
-+ }
-+}
-+#else
-+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
-+#endif /* !CONFIG_XEN */
-+
-+static void __init setup_IO_APIC_irqs(void)
-+{
-+ struct IO_APIC_route_entry entry;
-+ int apic, pin, idx, irq, first_notcon = 1, vector;
-+ unsigned long flags;
-+
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+
-+ /*
-+ * add it to the IO-APIC irq-routing table:
-+ */
-+ memset(&entry,0,sizeof(entry));
-+
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.mask = 0; /* enable IRQ */
-+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+
-+ idx = find_irq_entry(apic,pin,mp_INT);
-+ if (idx == -1) {
-+ if (first_notcon) {
-+ apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-+ first_notcon = 0;
-+ } else
-+ apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-+ continue;
-+ }
-+
-+ entry.trigger = irq_trigger(idx);
-+ entry.polarity = irq_polarity(idx);
-+
-+ if (irq_trigger(idx)) {
-+ entry.trigger = 1;
-+ entry.mask = 1;
-+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+ }
-+
-+ irq = pin_2_irq(idx, apic, pin);
-+ add_pin_to_irq(irq, apic, pin);
-+
-+ if (/* !apic && */ !IO_APIC_IRQ(irq))
-+ continue;
-+
-+ if (IO_APIC_IRQ(irq)) {
-+ vector = assign_irq_vector(irq);
-+ entry.vector = vector;
-+
-+ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-+ if (!apic && (irq < 16))
-+ disable_8259A_irq(irq);
-+ }
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+ set_native_irq_info(irq, TARGET_CPUS);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ }
-+ }
-+
-+ if (!first_notcon)
-+ apic_printk(APIC_VERBOSE," not connected.\n");
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Set up the 8259A-master output pin as broadcast to all
-+ * CPUs.
-+ */
-+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
-+{
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ memset(&entry,0,sizeof(entry));
-+
-+ disable_8259A_irq(0);
-+
-+ /* mask LVT0 */
-+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+
-+ /*
-+ * We use logical delivery to get the timer IRQ
-+ * to the first CPU.
-+ */
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.mask = 0; /* unmask IRQ now */
-+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.polarity = 0;
-+ entry.trigger = 0;
-+ entry.vector = vector;
-+
-+ /*
-+ * The timer IRQ doesn't have to know that behind the
-+ * scene we have a 8259A-master in AEOI mode ...
-+ */
-+ irq_desc[0].handler = &ioapic_edge_type;
-+
-+ /*
-+ * Add it to the IO-APIC irq-routing table:
-+ */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ enable_8259A_irq(0);
-+}
-+
-+void __init UNEXPECTED_IO_APIC(void)
-+{
-+}
-+
-+void __apicdebuginit print_IO_APIC(void)
-+{
-+ int apic, i;
-+ union IO_APIC_reg_00 reg_00;
-+ union IO_APIC_reg_01 reg_01;
-+ union IO_APIC_reg_02 reg_02;
-+ unsigned long flags;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-+ for (i = 0; i < nr_ioapics; i++)
-+ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-+ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
-+
-+ /*
-+ * We are a bit conservative about what we expect. We have to
-+ * know about every hardware change ASAP.
-+ */
-+ printk(KERN_INFO "testing the IO APIC.......................\n");
-+
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ reg_01.raw = io_apic_read(apic, 1);
-+ if (reg_01.bits.version >= 0x10)
-+ reg_02.raw = io_apic_read(apic, 2);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ printk("\n");
-+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
-+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
-+ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
-+ UNEXPECTED_IO_APIC();
-+
-+ printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
-+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
-+ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
-+ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
-+ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
-+ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
-+ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
-+ (reg_01.bits.entries != 0x2E) &&
-+ (reg_01.bits.entries != 0x3F) &&
-+ (reg_01.bits.entries != 0x03)
-+ )
-+ UNEXPECTED_IO_APIC();
-+
-+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
-+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
-+ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
-+ (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
-+ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
-+ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
-+ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
-+ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
-+ )
-+ UNEXPECTED_IO_APIC();
-+ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
-+ UNEXPECTED_IO_APIC();
-+
-+ if (reg_01.bits.version >= 0x10) {
-+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
-+ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
-+ UNEXPECTED_IO_APIC();
-+ }
-+
-+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
-+
-+ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
-+ " Stat Dest Deli Vect: \n");
-+
-+ for (i = 0; i <= reg_01.bits.entries; i++) {
-+ struct IO_APIC_route_entry entry;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
-+ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ printk(KERN_DEBUG " %02x %03X %02X ",
-+ i,
-+ entry.dest.logical.logical_dest,
-+ entry.dest.physical.physical_dest
-+ );
-+
-+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
-+ entry.mask,
-+ entry.trigger,
-+ entry.irr,
-+ entry.polarity,
-+ entry.delivery_status,
-+ entry.dest_mode,
-+ entry.delivery_mode,
-+ entry.vector
-+ );
-+ }
-+ }
-+ if (use_pci_vector())
-+ printk(KERN_INFO "Using vector-based indexing\n");
-+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
-+ for (i = 0; i < NR_IRQS; i++) {
-+ struct irq_pin_list *entry = irq_2_pin + i;
-+ if (entry->pin < 0)
-+ continue;
-+ if (use_pci_vector() && !platform_legacy_irq(i))
-+ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
-+ else
-+ printk(KERN_DEBUG "IRQ%d ", i);
-+ for (;;) {
-+ printk("-> %d:%d", entry->apic, entry->pin);
-+ if (!entry->next)
-+ break;
-+ entry = irq_2_pin + entry->next;
-+ }
-+ printk("\n");
-+ }
-+
-+ printk(KERN_INFO ".................................... done.\n");
-+
-+ return;
-+}
-+
-+#if 0
-+
-+static __apicdebuginit void print_APIC_bitfield (int base)
-+{
-+ unsigned int v;
-+ int i, j;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-+ for (i = 0; i < 8; i++) {
-+ v = apic_read(base + i*0x10);
-+ for (j = 0; j < 32; j++) {
-+ if (v & (1<<j))
-+ printk("1");
-+ else
-+ printk("0");
-+ }
-+ printk("\n");
-+ }
-+}
-+
-+void __apicdebuginit print_local_APIC(void * dummy)
-+{
-+ unsigned int v, ver, maxlvt;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-+ smp_processor_id(), hard_smp_processor_id());
-+ v = apic_read(APIC_ID);
-+ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
-+ v = apic_read(APIC_LVR);
-+ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-+ ver = GET_APIC_VERSION(v);
-+ maxlvt = get_maxlvt();
-+
-+ v = apic_read(APIC_TASKPRI);
-+ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-+
-+ v = apic_read(APIC_ARBPRI);
-+ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-+ v & APIC_ARBPRI_MASK);
-+ v = apic_read(APIC_PROCPRI);
-+ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-+
-+ v = apic_read(APIC_EOI);
-+ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-+ v = apic_read(APIC_RRR);
-+ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-+ v = apic_read(APIC_LDR);
-+ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-+ v = apic_read(APIC_DFR);
-+ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-+ v = apic_read(APIC_SPIV);
-+ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-+
-+ printk(KERN_DEBUG "... APIC ISR field:\n");
-+ print_APIC_bitfield(APIC_ISR);
-+ printk(KERN_DEBUG "... APIC TMR field:\n");
-+ print_APIC_bitfield(APIC_TMR);
-+ printk(KERN_DEBUG "... APIC IRR field:\n");
-+ print_APIC_bitfield(APIC_IRR);
-+
-+ v = apic_read(APIC_ESR);
-+ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-+
-+ v = apic_read(APIC_ICR);
-+ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-+ v = apic_read(APIC_ICR2);
-+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
-+
-+ v = apic_read(APIC_LVTT);
-+ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-+
-+ if (maxlvt > 3) { /* PC is LVT#4. */
-+ v = apic_read(APIC_LVTPC);
-+ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-+ }
-+ v = apic_read(APIC_LVT0);
-+ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-+ v = apic_read(APIC_LVT1);
-+ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-+
-+ if (maxlvt > 2) { /* ERR is LVT#3. */
-+ v = apic_read(APIC_LVTERR);
-+ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-+ }
-+
-+ v = apic_read(APIC_TMICT);
-+ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-+ v = apic_read(APIC_TMCCT);
-+ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-+ v = apic_read(APIC_TDCR);
-+ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-+ printk("\n");
-+}
-+
-+void print_all_local_APICs (void)
-+{
-+ on_each_cpu(print_local_APIC, NULL, 1, 1);
-+}
-+
-+void __apicdebuginit print_PIC(void)
-+{
-+ unsigned int v;
-+ unsigned long flags;
-+
-+ if (apic_verbosity == APIC_QUIET)
-+ return;
-+
-+ printk(KERN_DEBUG "\nprinting PIC contents\n");
-+
-+ spin_lock_irqsave(&i8259A_lock, flags);
-+
-+ v = inb(0xa1) << 8 | inb(0x21);
-+ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
-+
-+ v = inb(0xa0) << 8 | inb(0x20);
-+ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
-+
-+ outb(0x0b,0xa0);
-+ outb(0x0b,0x20);
-+ v = inb(0xa0) << 8 | inb(0x20);
-+ outb(0x0a,0xa0);
-+ outb(0x0a,0x20);
-+
-+ spin_unlock_irqrestore(&i8259A_lock, flags);
-+
-+ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
-+
-+ v = inb(0x4d1) << 8 | inb(0x4d0);
-+ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-+}
-+
-+#endif /* 0 */
-+
-+#else
-+void __init print_IO_APIC(void) { }
-+#endif /* !CONFIG_XEN */
-+
-+static void __init enable_IO_APIC(void)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+#ifndef CONFIG_XEN
-+ int i8259_apic, i8259_pin;
-+#endif
-+ int i, apic;
-+ unsigned long flags;
-+
-+ for (i = 0; i < PIN_MAP_SIZE; i++) {
-+ irq_2_pin[i].pin = -1;
-+ irq_2_pin[i].next = 0;
-+ }
-+ if (!pirqs_enabled)
-+ for (i = 0; i < MAX_PIRQS; i++)
-+ pirq_entries[i] = -1;
-+
-+ /*
-+ * The number of IO-APIC IRQ registers (== #pins):
-+ */
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(apic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-+ }
-+#ifndef CONFIG_XEN
-+ for(apic = 0; apic < nr_ioapics; apic++) {
-+ int pin;
-+ /* See if any of the pins is in ExtINT mode */
-+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+ struct IO_APIC_route_entry entry;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+
-+ /* If the interrupt line is enabled and in ExtInt mode
-+ * I have found the pin where the i8259 is connected.
-+ */
-+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
-+ ioapic_i8259.apic = apic;
-+ ioapic_i8259.pin = pin;
-+ goto found_i8259;
-+ }
-+ }
-+ }
-+ found_i8259:
-+ /* Look to see what if the MP table has reported the ExtINT */
-+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
-+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
-+ /* Trust the MP table if nothing is setup in the hardware */
-+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
-+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
-+ ioapic_i8259.pin = i8259_pin;
-+ ioapic_i8259.apic = i8259_apic;
-+ }
-+ /* Complain if the MP table and the hardware disagree */
-+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
-+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
-+ {
-+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
-+ }
-+#endif
-+
-+ /*
-+ * Do not trust the IO-APIC being empty at bootup
-+ */
-+ clear_IO_APIC();
-+}
-+
-+/*
-+ * Not an __init, needed by the reboot code
-+ */
-+void disable_IO_APIC(void)
-+{
-+ /*
-+ * Clear the IO-APIC before rebooting:
-+ */
-+ clear_IO_APIC();
-+
-+#ifndef CONFIG_XEN
-+ /*
-+ * If the i8259 is routed through an IOAPIC
-+ * Put that IOAPIC in virtual wire mode
-+ * so legacy interrupts can be delivered.
-+ */
-+ if (ioapic_i8259.pin != -1) {
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ memset(&entry, 0, sizeof(entry));
-+ entry.mask = 0; /* Enabled */
-+ entry.trigger = 0; /* Edge */
-+ entry.irr = 0;
-+ entry.polarity = 0; /* High */
-+ entry.delivery_status = 0;
-+ entry.dest_mode = 0; /* Physical */
-+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
-+ entry.vector = 0;
-+ entry.dest.physical.physical_dest =
-+ GET_APIC_ID(apic_read(APIC_ID));
-+
-+ /*
-+ * Add it to the IO-APIC irq-routing table:
-+ */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
-+ *(((int *)&entry)+1));
-+ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
-+ *(((int *)&entry)+0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ }
-+
-+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-+#endif
-+}
-+
-+/*
-+ * function to set the IO-APIC physical IDs based on the
-+ * values stored in the MPC table.
-+ *
-+ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
-+ */
-+
-+#ifndef CONFIG_XEN
-+static void __init setup_ioapic_ids_from_mpc (void)
-+{
-+ union IO_APIC_reg_00 reg_00;
-+ int apic;
-+ int i;
-+ unsigned char old_id;
-+ unsigned long flags;
-+
-+ /*
-+ * Set the IOAPIC ID to the value stored in the MPC table.
-+ */
-+ for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+ /* Read the register 0 value */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ old_id = mp_ioapics[apic].mpc_apicid;
-+
-+
-+ printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
-+
-+
-+ /*
-+ * We need to adjust the IRQ routing table
-+ * if the ID changed.
-+ */
-+ if (old_id != mp_ioapics[apic].mpc_apicid)
-+ for (i = 0; i < mp_irq_entries; i++)
-+ if (mp_irqs[i].mpc_dstapic == old_id)
-+ mp_irqs[i].mpc_dstapic
-+ = mp_ioapics[apic].mpc_apicid;
-+
-+ /*
-+ * Read the right value from the MPC table and
-+ * write it into the ID register.
-+ */
-+ apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
-+ mp_ioapics[apic].mpc_apicid);
-+
-+ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0, reg_00.raw);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ /*
-+ * Sanity check
-+ */
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(apic, 0);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
-+ printk("could not set ID!\n");
-+ else
-+ apic_printk(APIC_VERBOSE," ok.\n");
-+ }
-+}
-+#else
-+static void __init setup_ioapic_ids_from_mpc(void) { }
-+#endif
-+
-+/*
-+ * There is a nasty bug in some older SMP boards, their mptable lies
-+ * about the timer IRQ. We do the following to work around the situation:
-+ *
-+ * - timer IRQ defaults to IO-APIC IRQ
-+ * - if this function detects that timer IRQs are defunct, then we fall
-+ * back to ISA timer IRQs
-+ */
-+#ifndef CONFIG_XEN
-+static int __init timer_irq_works(void)
-+{
-+ unsigned long t1 = jiffies;
-+
-+ local_irq_enable();
-+ /* Let ten ticks pass... */
-+ mdelay((10 * 1000) / HZ);
-+
-+ /*
-+ * Expect a few ticks at least, to be sure some possible
-+ * glue logic does not lock up after one or two first
-+ * ticks in a non-ExtINT mode. Also the local APIC
-+ * might have cached one ExtINT interrupt. Finally, at
-+ * least one tick may be lost due to delays.
-+ */
-+
-+ /* jiffies wrap? */
-+ if (jiffies - t1 > 4)
-+ return 1;
-+ return 0;
-+}
-+
-+/*
-+ * In the SMP+IOAPIC case it might happen that there are an unspecified
-+ * number of pending IRQ events unhandled. These cases are very rare,
-+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
-+ * better to do it this way as thus we do not have to be aware of
-+ * 'pending' interrupts in the IRQ path, except at this point.
-+ */
-+/*
-+ * Edge triggered needs to resend any interrupt
-+ * that was delayed but this is now handled in the device
-+ * independent code.
-+ */
-+
-+/*
-+ * Starting up a edge-triggered IO-APIC interrupt is
-+ * nasty - we need to make sure that we get the edge.
-+ * If it is already asserted for some reason, we need
-+ * return 1 to indicate that is was pending.
-+ *
-+ * This is not complete - we should be able to fake
-+ * an edge even if it isn't on the 8259A...
-+ */
-+
-+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
-+{
-+ int was_pending = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ if (irq < 16) {
-+ disable_8259A_irq(irq);
-+ if (i8259A_irq_pending(irq))
-+ was_pending = 1;
-+ }
-+ __unmask_IO_APIC_irq(irq);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return was_pending;
-+}
-+
-+/*
-+ * Once we have recorded IRQ_PENDING already, we can mask the
-+ * interrupt for real. This prevents IRQ storms from unhandled
-+ * devices.
-+ */
-+static void ack_edge_ioapic_irq(unsigned int irq)
-+{
-+ move_irq(irq);
-+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
-+ == (IRQ_PENDING | IRQ_DISABLED))
-+ mask_IO_APIC_irq(irq);
-+ ack_APIC_irq();
-+}
-+
-+/*
-+ * Level triggered interrupts can just be masked,
-+ * and shutting down and starting up the interrupt
-+ * is the same as enabling and disabling them -- except
-+ * with a startup need to return a "was pending" value.
-+ *
-+ * Level triggered interrupts are special because we
-+ * do not touch any IO-APIC register while handling
-+ * them. We ack the APIC in the end-IRQ handler, not
-+ * in the start-IRQ-handler. Protection against reentrance
-+ * from the same interrupt is still provided, both by the
-+ * generic IRQ layer and by the fact that an unacked local
-+ * APIC does not accept IRQs.
-+ */
-+static unsigned int startup_level_ioapic_irq (unsigned int irq)
-+{
-+ unmask_IO_APIC_irq(irq);
-+
-+ return 0; /* don't check for pending */
-+}
-+
-+static void end_level_ioapic_irq (unsigned int irq)
-+{
-+ move_irq(irq);
-+ ack_APIC_irq();
-+}
-+
-+#ifdef CONFIG_PCI_MSI
-+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ return startup_edge_ioapic_irq(irq);
-+}
-+
-+static void ack_edge_ioapic_vector(unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ move_native_irq(vector);
-+ ack_edge_ioapic_irq(irq);
-+}
-+
-+static unsigned int startup_level_ioapic_vector (unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ return startup_level_ioapic_irq (irq);
-+}
-+
-+static void end_level_ioapic_vector (unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ move_native_irq(vector);
-+ end_level_ioapic_irq(irq);
-+}
-+
-+static void mask_IO_APIC_vector (unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ mask_IO_APIC_irq(irq);
-+}
-+
-+static void unmask_IO_APIC_vector (unsigned int vector)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ unmask_IO_APIC_irq(irq);
-+}
-+
-+#ifdef CONFIG_SMP
-+static void set_ioapic_affinity_vector (unsigned int vector,
-+ cpumask_t cpu_mask)
-+{
-+ int irq = vector_to_irq(vector);
-+
-+ set_native_irq_info(vector, cpu_mask);
-+ set_ioapic_affinity_irq(irq, cpu_mask);
-+}
-+#endif // CONFIG_SMP
-+#endif // CONFIG_PCI_MSI
-+
-+/*
-+ * Level and edge triggered IO-APIC interrupts need different handling,
-+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
-+ * handled with the level-triggered descriptor, but that one has slightly
-+ * more overhead. Level-triggered interrupts cannot be handled with the
-+ * edge-triggered handler, without risking IRQ storms and other ugly
-+ * races.
-+ */
-+
-+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
-+ .typename = "IO-APIC-edge",
-+ .startup = startup_edge_ioapic,
-+ .shutdown = shutdown_edge_ioapic,
-+ .enable = enable_edge_ioapic,
-+ .disable = disable_edge_ioapic,
-+ .ack = ack_edge_ioapic,
-+ .end = end_edge_ioapic,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_ioapic_affinity,
-+#endif
-+};
-+
-+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
-+ .typename = "IO-APIC-level",
-+ .startup = startup_level_ioapic,
-+ .shutdown = shutdown_level_ioapic,
-+ .enable = enable_level_ioapic,
-+ .disable = disable_level_ioapic,
-+ .ack = mask_and_ack_level_ioapic,
-+ .end = end_level_ioapic,
-+#ifdef CONFIG_SMP
-+ .set_affinity = set_ioapic_affinity,
-+#endif
-+};
-+#endif /* !CONFIG_XEN */
-+
-+static inline void init_IO_APIC_traps(void)
-+{
-+ int irq;
-+
-+ /*
-+ * NOTE! The local APIC isn't very good at handling
-+ * multiple interrupts at the same interrupt level.
-+ * As the interrupt level is determined by taking the
-+ * vector number and shifting that right by 4, we
-+ * want to spread these out a bit so that they don't
-+ * all fall in the same interrupt level.
-+ *
-+ * Also, we've got to be careful not to trash gate
-+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
-+ */
-+ for (irq = 0; irq < NR_IRQS ; irq++) {
-+ int tmp = irq;
-+ if (use_pci_vector()) {
-+ if (!platform_legacy_irq(tmp))
-+ if ((tmp = vector_to_irq(tmp)) == -1)
-+ continue;
-+ }
-+ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
-+ /*
-+ * Hmm.. We don't have an entry for this,
-+ * so default to an old-fashioned 8259
-+ * interrupt if we can..
-+ */
-+ if (irq < 16)
-+ make_8259A_irq(irq);
-+#ifndef CONFIG_XEN
-+ else
-+ /* Strange. Oh, well.. */
-+ irq_desc[irq].handler = &no_irq_type;
-+#endif
-+ }
-+ }
-+}
-+
-+#ifndef CONFIG_XEN
-+static void enable_lapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+
-+ v = apic_read(APIC_LVT0);
-+ apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
-+}
-+
-+static void disable_lapic_irq (unsigned int irq)
-+{
-+ unsigned long v;
-+
-+ v = apic_read(APIC_LVT0);
-+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
-+}
-+
-+static void ack_lapic_irq (unsigned int irq)
-+{
-+ ack_APIC_irq();
-+}
-+
-+static void end_lapic_irq (unsigned int i) { /* nothing */ }
-+
-+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
-+ .typename = "local-APIC-edge",
-+ .startup = NULL, /* startup_irq() not used for IRQ0 */
-+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
-+ .enable = enable_lapic_irq,
-+ .disable = disable_lapic_irq,
-+ .ack = ack_lapic_irq,
-+ .end = end_lapic_irq,
-+};
-+
-+static void setup_nmi (void)
-+{
-+ /*
-+ * Dirty trick to enable the NMI watchdog ...
-+ * We put the 8259A master into AEOI mode and
-+ * unmask on all local APICs LVT0 as NMI.
-+ *
-+ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-+ * is from Maciej W. Rozycki - so we do not have to EOI from
-+ * the NMI handler or the timer interrupt.
-+ */
-+ printk(KERN_INFO "activating NMI Watchdog ...");
-+
-+ enable_NMI_through_LVT0(NULL);
-+
-+ printk(" done.\n");
-+}
-+
-+/*
-+ * This looks a bit hackish but it's about the only one way of sending
-+ * a few INTA cycles to 8259As and any associated glue logic. ICR does
-+ * not support the ExtINT mode, unfortunately. We need to send these
-+ * cycles as some i82489DX-based boards have glue logic that keeps the
-+ * 8259A interrupt line asserted until INTA. --macro
-+ */
-+static inline void unlock_ExtINT_logic(void)
-+{
-+ int apic, pin, i;
-+ struct IO_APIC_route_entry entry0, entry1;
-+ unsigned char save_control, save_freq_select;
-+ unsigned long flags;
-+
-+ pin = find_isa_irq_pin(8, mp_INT);
-+ apic = find_isa_irq_apic(8, mp_INT);
-+ if (pin == -1)
-+ return;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+ clear_IO_APIC_pin(apic, pin);
-+
-+ memset(&entry1, 0, sizeof(entry1));
-+
-+ entry1.dest_mode = 0; /* physical delivery */
-+ entry1.mask = 0; /* unmask IRQ now */
-+ entry1.dest.physical.physical_dest = hard_smp_processor_id();
-+ entry1.delivery_mode = dest_ExtINT;
-+ entry1.polarity = entry0.polarity;
-+ entry1.trigger = 0;
-+ entry1.vector = 0;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
-+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ save_control = CMOS_READ(RTC_CONTROL);
-+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-+ RTC_FREQ_SELECT);
-+ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-+
-+ i = 100;
-+ while (i-- > 0) {
-+ mdelay(10);
-+ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-+ i -= 10;
-+ }
-+
-+ CMOS_WRITE(save_control, RTC_CONTROL);
-+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+ clear_IO_APIC_pin(apic, pin);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
-+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+/*
-+ * This code may look a bit paranoid, but it's supposed to cooperate with
-+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
-+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
-+ * fanatically on his truly buggy board.
-+ *
-+ * FIXME: really need to revamp this for modern platforms only.
-+ */
-+static inline void check_timer(void)
-+{
-+ int apic1, pin1, apic2, pin2;
-+ int vector;
-+
-+ /*
-+ * get/set the timer IRQ vector:
-+ */
-+ disable_8259A_irq(0);
-+ vector = assign_irq_vector(0);
-+ set_intr_gate(vector, interrupt[0]);
-+
-+ /*
-+ * Subtle, code in do_timer_interrupt() expects an AEOI
-+ * mode for the 8259A whenever interrupts are routed
-+ * through I/O APICs. Also IRQ0 has to be enabled in
-+ * the 8259A which implies the virtual wire has to be
-+ * disabled in the local APIC.
-+ */
-+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+ init_8259A(1);
-+ if (timer_over_8254 > 0)
-+ enable_8259A_irq(0);
-+
-+ pin1 = find_isa_irq_pin(0, mp_INT);
-+ apic1 = find_isa_irq_apic(0, mp_INT);
-+ pin2 = ioapic_i8259.pin;
-+ apic2 = ioapic_i8259.apic;
-+
-+ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
-+ vector, apic1, pin1, apic2, pin2);
-+
-+ if (pin1 != -1) {
-+ /*
-+ * Ok, does IRQ0 through the IOAPIC work?
-+ */
-+ unmask_IO_APIC_irq(0);
-+ if (!no_timer_check && timer_irq_works()) {
-+ nmi_watchdog_default();
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ disable_8259A_irq(0);
-+ setup_nmi();
-+ enable_8259A_irq(0);
-+ }
-+ if (disable_timer_pin_1 > 0)
-+ clear_IO_APIC_pin(0, pin1);
-+ return;
-+ }
-+ clear_IO_APIC_pin(apic1, pin1);
-+ apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
-+ "connected to IO-APIC\n");
-+ }
-+
-+ apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
-+ "through the 8259A ... ");
-+ if (pin2 != -1) {
-+ apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
-+ apic2, pin2);
-+ /*
-+ * legacy devices should be connected to IO APIC #0
-+ */
-+ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
-+ if (timer_irq_works()) {
-+ printk("works.\n");
-+ nmi_watchdog_default();
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ setup_nmi();
-+ }
-+ return;
-+ }
-+ /*
-+ * Cleanup, just in case ...
-+ */
-+ clear_IO_APIC_pin(apic2, pin2);
-+ }
-+ printk(" failed.\n");
-+
-+ if (nmi_watchdog == NMI_IO_APIC) {
-+ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
-+ nmi_watchdog = 0;
-+ }
-+
-+ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
-+
-+ disable_8259A_irq(0);
-+ irq_desc[0].handler = &lapic_irq_type;
-+ apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
-+ enable_8259A_irq(0);
-+
-+ if (timer_irq_works()) {
-+ apic_printk(APIC_QUIET, " works.\n");
-+ return;
-+ }
-+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
-+ apic_printk(APIC_VERBOSE," failed.\n");
-+
-+ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
-+
-+ init_8259A(0);
-+ make_8259A_irq(0);
-+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
-+
-+ unlock_ExtINT_logic();
-+
-+ if (timer_irq_works()) {
-+ apic_printk(APIC_VERBOSE," works.\n");
-+ return;
-+ }
-+ apic_printk(APIC_VERBOSE," failed :(.\n");
-+ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
-+}
-+#else
-+#define check_timer() ((void)0)
-+#endif /* !CONFIG_XEN */
-+
-+static int __init notimercheck(char *s)
-+{
-+ no_timer_check = 1;
-+ return 1;
-+}
-+__setup("no_timer_check", notimercheck);
-+
-+/*
-+ *
-+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
-+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
-+ * Linux doesn't really care, as it's not actually used
-+ * for any interrupt handling anyway.
-+ */
-+#define PIC_IRQS (1<<2)
-+
-+void __init setup_IO_APIC(void)
-+{
-+ enable_IO_APIC();
-+
-+ if (acpi_ioapic)
-+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
-+ else
-+ io_apic_irqs = ~PIC_IRQS;
-+
-+ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
-+
-+ /*
-+ * Set up the IO-APIC IRQ routing table.
-+ */
-+ if (!acpi_ioapic)
-+ setup_ioapic_ids_from_mpc();
-+#ifndef CONFIG_XEN
-+ sync_Arb_IDs();
-+#endif /* !CONFIG_XEN */
-+ setup_IO_APIC_irqs();
-+ init_IO_APIC_traps();
-+ check_timer();
-+ if (!acpi_ioapic)
-+ print_IO_APIC();
-+}
-+
-+struct sysfs_ioapic_data {
-+ struct sys_device dev;
-+ struct IO_APIC_route_entry entry[0];
-+};
-+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
-+
-+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+ struct IO_APIC_route_entry *entry;
-+ struct sysfs_ioapic_data *data;
-+ unsigned long flags;
-+ int i;
-+
-+ data = container_of(dev, struct sysfs_ioapic_data, dev);
-+ entry = data->entry;
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
-+ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
-+ }
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return 0;
-+}
-+
-+static int ioapic_resume(struct sys_device *dev)
-+{
-+ struct IO_APIC_route_entry *entry;
-+ struct sysfs_ioapic_data *data;
-+ unsigned long flags;
-+ union IO_APIC_reg_00 reg_00;
-+ int i;
-+
-+ data = container_of(dev, struct sysfs_ioapic_data, dev);
-+ entry = data->entry;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_00.raw = io_apic_read(dev->id, 0);
-+ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
-+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
-+ io_apic_write(dev->id, 0, reg_00.raw);
-+ }
-+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
-+ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
-+ }
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return 0;
-+}
-+
-+static struct sysdev_class ioapic_sysdev_class = {
-+ set_kset_name("ioapic"),
-+ .suspend = ioapic_suspend,
-+ .resume = ioapic_resume,
-+};
-+
-+static int __init ioapic_init_sysfs(void)
-+{
-+ struct sys_device * dev;
-+ int i, size, error = 0;
-+
-+ error = sysdev_class_register(&ioapic_sysdev_class);
-+ if (error)
-+ return error;
-+
-+ for (i = 0; i < nr_ioapics; i++ ) {
-+ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
-+ * sizeof(struct IO_APIC_route_entry);
-+ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
-+ if (!mp_ioapic_data[i]) {
-+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+ continue;
-+ }
-+ memset(mp_ioapic_data[i], 0, size);
-+ dev = &mp_ioapic_data[i]->dev;
-+ dev->id = i;
-+ dev->cls = &ioapic_sysdev_class;
-+ error = sysdev_register(dev);
-+ if (error) {
-+ kfree(mp_ioapic_data[i]);
-+ mp_ioapic_data[i] = NULL;
-+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+ continue;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+device_initcall(ioapic_init_sysfs);
-+
-+/* --------------------------------------------------------------------------
-+ ACPI-based IOAPIC Configuration
-+ -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+#define IO_APIC_MAX_ID 0xFE
-+
-+int __init io_apic_get_version (int ioapic)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(ioapic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return reg_01.bits.version;
-+}
-+
-+
-+int __init io_apic_get_redir_entries (int ioapic)
-+{
-+ union IO_APIC_reg_01 reg_01;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ reg_01.raw = io_apic_read(ioapic, 1);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return reg_01.bits.entries;
-+}
-+
-+
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
-+{
-+ struct IO_APIC_route_entry entry;
-+ unsigned long flags;
-+
-+ if (!IO_APIC_IRQ(irq)) {
-+ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-+ ioapic);
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
-+ * Note that we mask (disable) IRQs now -- these get enabled when the
-+ * corresponding device driver registers for this IRQ.
-+ */
-+
-+ memset(&entry,0,sizeof(entry));
-+
-+ entry.delivery_mode = INT_DELIVERY_MODE;
-+ entry.dest_mode = INT_DEST_MODE;
-+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+ entry.trigger = edge_level;
-+ entry.polarity = active_high_low;
-+ entry.mask = 1; /* Disabled (masked) */
-+
-+ irq = gsi_irq_sharing(irq);
-+ /*
-+ * IRQs < 16 are already in the irq_2_pin[] map
-+ */
-+ if (irq >= 16)
-+ add_pin_to_irq(irq, ioapic, pin);
-+
-+ entry.vector = assign_irq_vector(irq);
-+
-+ apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
-+ "IRQ %d Mode:%i Active:%i)\n", ioapic,
-+ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
-+ edge_level, active_high_low);
-+
-+ ioapic_register_intr(irq, entry.vector, edge_level);
-+
-+ if (!ioapic && (irq < 16))
-+ disable_8259A_irq(irq);
-+
-+ spin_lock_irqsave(&ioapic_lock, flags);
-+ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
-+ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
-+ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
-+ spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+ return 0;
-+}
-+
-+#endif /* CONFIG_ACPI */
-+
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * This function currently is only a helper for the i386 smp boot process where
-+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
-+ * so mask in all cases should simply be TARGET_CPUS
-+ */
-+#ifdef CONFIG_SMP
-+void __init setup_ioapic_dest(void)
-+{
-+ int pin, ioapic, irq, irq_entry;
-+
-+ if (skip_ioapic_setup == 1)
-+ return;
-+
-+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-+ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-+ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-+ if (irq_entry == -1)
-+ continue;
-+ irq = pin_2_irq(irq_entry, ioapic, pin);
-+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
-+ }
-+
-+ }
-+}
-+#endif
-+#endif /* !CONFIG_XEN */
-diff --git a/arch/x86_64/kernel/ioport-xen.c b/arch/x86_64/kernel/ioport-xen.c
-new file mode 100644
-index 0000000..1b628df
---- /dev/null
-+++ b/arch/x86_64/kernel/ioport-xen.c
-@@ -0,0 +1,58 @@
-+/*
-+ * linux/arch/x86_64/kernel/ioport.c
-+ *
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/capability.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <xen/interface/physdev.h>
-+
-+/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ */
-+
-+asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
-+{
-+ unsigned int old_iopl = current->thread.iopl;
-+ physdev_op_t op;
-+
-+ if (new_iopl > 3)
-+ return -EINVAL;
-+
-+ /* Need "raw I/O" privileges for direct port access. */
-+ if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
-+ return -EPERM;
-+
-+ /* Change our version of the privilege levels. */
-+ current->thread.iopl = new_iopl;
-+
-+ /* Force the change at ring 0. */
-+ op.cmd = PHYSDEVOP_SET_IOPL;
-+ op.u.set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
-+ HYPERVISOR_physdev_op(&op);
-+
-+ return 0;
-+}
-+
-+/*
-+ * this changes the io permissions bitmap in the current task.
-+ */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-+{
-+ return turn_on ? sys_iopl(3, NULL) : 0;
-+}
-diff --git a/arch/x86_64/kernel/irq-xen.c b/arch/x86_64/kernel/irq-xen.c
-new file mode 100644
-index 0000000..c64715d
---- /dev/null
-+++ b/arch/x86_64/kernel/irq-xen.c
-@@ -0,0 +1,168 @@
-+/*
-+ * linux/arch/x86_64/kernel/irq.c
-+ *
-+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86_64-specific interrupt
-+ * entry and irq statistics code. All the remaining irq logic is
-+ * done by the generic kernel/irq/ code and in the
-+ * x86_64-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
-+
-+#include <linux/kernel_stat.h>
-+#include <linux/interrupt.h>
-+#include <linux/seq_file.h>
-+#include <linux/module.h>
-+#include <linux/delay.h>
-+#include <asm/uaccess.h>
-+#include <asm/io_apic.h>
-+#include <asm/idle.h>
-+
-+atomic_t irq_err_count;
-+#ifdef CONFIG_X86_IO_APIC
-+#ifdef APIC_MISMATCH_DEBUG
-+atomic_t irq_mis_count;
-+#endif
-+#endif
-+
-+/*
-+ * Generic, controller-independent functions:
-+ */
-+
-+int show_interrupts(struct seq_file *p, void *v)
-+{
-+ int i = *(loff_t *) v, j;
-+ struct irqaction * action;
-+ unsigned long flags;
-+
-+ if (i == 0) {
-+ seq_printf(p, " ");
-+ for (j=0; j<NR_CPUS; j++)
-+ if (cpu_online(j))
-+ seq_printf(p, "CPU%d ",j);
-+ seq_putc(p, '\n');
-+ }
-+
-+ if (i < NR_IRQS) {
-+ spin_lock_irqsave(&irq_desc[i].lock, flags);
-+ action = irq_desc[i].action;
-+ if (!action)
-+ goto skip;
-+ seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+ seq_printf(p, "%10u ", kstat_irqs(i));
-+#else
-+ for (j=0; j<NR_CPUS; j++)
-+ if (cpu_online(j))
-+ seq_printf(p, "%10u ",
-+ kstat_cpu(j).irqs[i]);
-+#endif
-+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
-+
-+ seq_printf(p, " %s", action->name);
-+ for (action=action->next; action; action = action->next)
-+ seq_printf(p, ", %s", action->name);
-+ seq_putc(p, '\n');
-+skip:
-+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+ } else if (i == NR_IRQS) {
-+ seq_printf(p, "NMI: ");
-+ for (j = 0; j < NR_CPUS; j++)
-+ if (cpu_online(j))
-+ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
-+ seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ seq_printf(p, "LOC: ");
-+ for (j = 0; j < NR_CPUS; j++)
-+ if (cpu_online(j))
-+ seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
-+ seq_putc(p, '\n');
-+#endif
-+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+#ifdef CONFIG_X86_IO_APIC
-+#ifdef APIC_MISMATCH_DEBUG
-+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-+#endif
-+#endif
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
-+ */
-+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
-+{
-+ /* high bits used in ret_from_ code */
-+ int irq = regs->orig_rax & __IRQ_MASK(HARDIRQ_BITS);
-+
-+ exit_idle();
-+ irq_enter();
-+
-+ __do_IRQ(irq, regs);
-+ irq_exit();
-+
-+ return 1;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+void fixup_irqs(cpumask_t map)
-+{
-+ unsigned int irq;
-+ static int warned;
-+
-+ for (irq = 0; irq < NR_IRQS; irq++) {
-+ cpumask_t mask;
-+ if (irq == 2)
-+ continue;
-+
-+ cpus_and(mask, irq_affinity[irq], map);
-+ if (any_online_cpu(mask) == NR_CPUS) {
-+ printk("Breaking affinity for irq %i\n", irq);
-+ mask = map;
-+ }
-+ if (irq_desc[irq].handler->set_affinity)
-+ irq_desc[irq].handler->set_affinity(irq, mask);
-+ else if (irq_desc[irq].action && !(warned++))
-+ printk("Cannot set affinity for irq %i\n", irq);
-+ }
-+
-+ /* That doesn't seem sufficient. Give it 1ms. */
-+ local_irq_enable();
-+ mdelay(1);
-+ local_irq_disable();
-+}
-+#endif
-+
-+extern void call_softirq(void);
-+
-+asmlinkage void do_softirq(void)
-+{
-+ __u32 pending;
-+ unsigned long flags;
-+
-+ if (in_interrupt())
-+ return;
-+
-+ local_irq_save(flags);
-+ pending = local_softirq_pending();
-+ /* Switch to interrupt stack */
-+ if (pending)
-+ call_softirq();
-+ local_irq_restore(flags);
-+}
-+EXPORT_SYMBOL(do_softirq);
-+
-+#ifndef CONFIG_X86_LOCAL_APIC
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+ printk("unexpected IRQ trap at vector %02x\n", irq);
-+}
-+#endif
-diff --git a/arch/x86_64/kernel/ldt-xen.c b/arch/x86_64/kernel/ldt-xen.c
-new file mode 100644
-index 0000000..6efbb64
---- /dev/null
-+++ b/arch/x86_64/kernel/ldt-xen.c
-@@ -0,0 +1,282 @@
-+/*
-+ * linux/arch/x86_64/kernel/ldt.c
-+ *
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
-+ * Copyright (C) 2002 Andi Kleen
-+ *
-+ * This handles calls from both 32bit and 64bit mode.
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/slab.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/pgalloc.h>
-+
-+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-+static void flush_ldt(void *null)
-+{
-+ if (current->active_mm)
-+ load_LDT(&current->active_mm->context);
-+}
-+#endif
-+
-+static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
-+{
-+ void *oldldt;
-+ void *newldt;
-+ unsigned oldsize;
-+
-+ if (mincount <= (unsigned)pc->size)
-+ return 0;
-+ oldsize = pc->size;
-+ mincount = (mincount+511)&(~511);
-+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-+ else
-+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-+
-+ if (!newldt)
-+ return -ENOMEM;
-+
-+ if (oldsize)
-+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-+ oldldt = pc->ldt;
-+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-+ wmb();
-+ pc->ldt = newldt;
-+ wmb();
-+ pc->size = mincount;
-+ wmb();
-+ if (reload) {
-+#ifdef CONFIG_SMP
-+ cpumask_t mask;
-+
-+ preempt_disable();
-+#endif
-+ make_pages_readonly(
-+ pc->ldt,
-+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ load_LDT(pc);
-+#ifdef CONFIG_SMP
-+ mask = cpumask_of_cpu(smp_processor_id());
-+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+ smp_call_function(flush_ldt, NULL, 1, 1);
-+ preempt_enable();
-+#endif
-+ }
-+ if (oldsize) {
-+ make_pages_writable(
-+ oldldt,
-+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ vfree(oldldt);
-+ else
-+ kfree(oldldt);
-+ }
-+ return 0;
-+}
-+
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-+{
-+ int err = alloc_ldt(new, old->size, 0);
-+ if (err < 0)
-+ return err;
-+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-+ make_pages_readonly(
-+ new->ldt,
-+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ return 0;
-+}
-+
-+/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-+{
-+ struct mm_struct * old_mm;
-+ int retval = 0;
-+
-+ memset(&mm->context, 0, sizeof(mm->context));
-+ init_MUTEX(&mm->context.sem);
-+ old_mm = current->mm;
-+ if (old_mm && old_mm->context.size > 0) {
-+ down(&old_mm->context.sem);
-+ retval = copy_ldt(&mm->context, &old_mm->context);
-+ up(&old_mm->context.sem);
-+ }
-+ if (retval == 0) {
-+ spin_lock(&mm_unpinned_lock);
-+ list_add(&mm->context.unpinned, &mm_unpinned);
-+ spin_unlock(&mm_unpinned_lock);
-+ }
-+ return retval;
-+}
-+
-+/*
-+ *
-+ * Don't touch the LDT register - we're already in the next thread.
-+ */
-+void destroy_context(struct mm_struct *mm)
-+{
-+ if (mm->context.size) {
-+ if (mm == current->active_mm)
-+ clear_LDT();
-+ make_pages_writable(
-+ mm->context.ldt,
-+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+ XENFEAT_writable_descriptor_tables);
-+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-+ vfree(mm->context.ldt);
-+ else
-+ kfree(mm->context.ldt);
-+ mm->context.size = 0;
-+ }
-+ if (!mm->context.pinned) {
-+ spin_lock(&mm_unpinned_lock);
-+ list_del(&mm->context.unpinned);
-+ spin_unlock(&mm_unpinned_lock);
-+ }
-+}
-+
-+static int read_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+ int err;
-+ unsigned long size;
-+ struct mm_struct * mm = current->mm;
-+
-+ if (!mm->context.size)
-+ return 0;
-+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-+
-+ down(&mm->context.sem);
-+ size = mm->context.size*LDT_ENTRY_SIZE;
-+ if (size > bytecount)
-+ size = bytecount;
-+
-+ err = 0;
-+ if (copy_to_user(ptr, mm->context.ldt, size))
-+ err = -EFAULT;
-+ up(&mm->context.sem);
-+ if (err < 0)
-+ goto error_return;
-+ if (size != bytecount) {
-+ /* zero-fill the rest */
-+ if (clear_user(ptr+size, bytecount-size) != 0) {
-+ err = -EFAULT;
-+ goto error_return;
-+ }
-+ }
-+ return bytecount;
-+error_return:
-+ return err;
-+}
-+
-+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+ /* Arbitrary number */
-+ /* x86-64 default LDT is all zeros */
-+ if (bytecount > 128)
-+ bytecount = 128;
-+ if (clear_user(ptr, bytecount))
-+ return -EFAULT;
-+ return bytecount;
-+}
-+
-+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
-+{
-+ struct task_struct *me = current;
-+ struct mm_struct * mm = me->mm;
-+ __u32 entry_1, entry_2, *lp;
-+ unsigned long mach_lp;
-+ int error;
-+ struct user_desc ldt_info;
-+
-+ error = -EINVAL;
-+
-+ if (bytecount != sizeof(ldt_info))
-+ goto out;
-+ error = -EFAULT;
-+ if (copy_from_user(&ldt_info, ptr, bytecount))
-+ goto out;
-+
-+ error = -EINVAL;
-+ if (ldt_info.entry_number >= LDT_ENTRIES)
-+ goto out;
-+ if (ldt_info.contents == 3) {
-+ if (oldmode)
-+ goto out;
-+ if (ldt_info.seg_not_present == 0)
-+ goto out;
-+ }
-+
-+ down(&mm->context.sem);
-+ if (ldt_info.entry_number >= (unsigned)mm->context.size) {
-+ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-+ if (error < 0)
-+ goto out_unlock;
-+ }
-+
-+ lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
-+ mach_lp = arbitrary_virt_to_machine(lp);
-+
-+ /* Allow LDTs to be cleared by the user. */
-+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+ if (oldmode || LDT_empty(&ldt_info)) {
-+ entry_1 = 0;
-+ entry_2 = 0;
-+ goto install;
-+ }
-+ }
-+
-+ entry_1 = LDT_entry_a(&ldt_info);
-+ entry_2 = LDT_entry_b(&ldt_info);
-+ if (oldmode)
-+ entry_2 &= ~(1 << 20);
-+
-+ /* Install the new entry ... */
-+install:
-+ error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
-+
-+out_unlock:
-+ up(&mm->context.sem);
-+out:
-+ return error;
-+}
-+
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
-+{
-+ int ret = -ENOSYS;
-+
-+ switch (func) {
-+ case 0:
-+ ret = read_ldt(ptr, bytecount);
-+ break;
-+ case 1:
-+ ret = write_ldt(ptr, bytecount, 1);
-+ break;
-+ case 2:
-+ ret = read_default_ldt(ptr, bytecount);
-+ break;
-+ case 0x11:
-+ ret = write_ldt(ptr, bytecount, 0);
-+ break;
-+ }
-+ return ret;
-+}
-diff --git a/arch/x86_64/kernel/mpparse-xen.c b/arch/x86_64/kernel/mpparse-xen.c
-new file mode 100644
-index 0000000..a5f7437
---- /dev/null
-+++ b/arch/x86_64/kernel/mpparse-xen.c
-@@ -0,0 +1,1005 @@
-+/*
-+ * Intel Multiprocessor Specification 1.1 and 1.4
-+ * compliant MP-table parsing routines.
-+ *
-+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
-+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
-+ *
-+ * Fixes
-+ * Erich Boleyn : MP v1.4 and additional changes.
-+ * Alan Cox : Added EBDA scanning
-+ * Ingo Molnar : various cleanups and rewrites
-+ * Maciej W. Rozycki: Bits for default MP configurations
-+ * Paul Diefenbaugh: Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/config.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+#include <linux/module.h>
-+
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/pgalloc.h>
-+#include <asm/io_apic.h>
-+#include <asm/proto.h>
-+#include <asm/acpi.h>
-+
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __initdata maxcpus = NR_CPUS;
-+
-+int acpi_found_madt;
-+
-+/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
-+ */
-+unsigned char apic_version [MAX_APICS];
-+unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+
-+static int mp_current_pci_id = 0;
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-+
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-+
-+/* MP IRQ source entries */
-+int mp_irq_entries;
-+
-+int nr_ioapics;
-+int pic_mode;
-+unsigned long mp_lapic_addr = 0;
-+
-+
-+
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_id = -1U;
-+/* Internal processor count */
-+unsigned int num_processors __initdata = 0;
-+
-+unsigned disabled_cpus __initdata;
-+
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
-+
-+/* ACPI MADT entry parsing functions */
-+#ifdef CONFIG_ACPI
-+extern struct acpi_boot_flags acpi_boot;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+extern int acpi_parse_lapic (acpi_table_entry_header *header);
-+extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
-+extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
-+#endif /*CONFIG_X86_LOCAL_APIC*/
-+#ifdef CONFIG_X86_IO_APIC
-+extern int acpi_parse_ioapic (acpi_table_entry_header *header);
-+#endif /*CONFIG_X86_IO_APIC*/
-+#endif /*CONFIG_ACPI*/
-+
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
-+
-+/*
-+ * Checksum an MP configuration block.
-+ */
-+
-+static int __init mpf_checksum(unsigned char *mp, int len)
-+{
-+ int sum = 0;
-+
-+ while (len--)
-+ sum += *mp++;
-+
-+ return sum & 0xFF;
-+}
-+
-+#ifndef CONFIG_XEN
-+static void __init MP_processor_info (struct mpc_config_processor *m)
-+{
-+ int cpu;
-+ unsigned char ver;
-+ static int found_bsp=0;
-+
-+ if (!(m->mpc_cpuflag & CPU_ENABLED)) {
-+ disabled_cpus++;
-+ return;
-+ }
-+
-+ printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
-+ m->mpc_apicid,
-+ (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
-+ (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
-+ m->mpc_apicver);
-+
-+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ Dprintk(" Bootup CPU\n");
-+ boot_cpu_id = m->mpc_apicid;
-+ }
-+ if (num_processors >= NR_CPUS) {
-+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+ " Processor ignored.\n", NR_CPUS);
-+ return;
-+ }
-+
-+ cpu = num_processors++;
-+
-+#if MAX_APICS < 255
-+ if ((int)m->mpc_apicid > MAX_APICS) {
-+ printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
-+ m->mpc_apicid, MAX_APICS);
-+ return;
-+ }
-+#endif
-+ ver = m->mpc_apicver;
-+
-+ physid_set(m->mpc_apicid, phys_cpu_present_map);
-+ /*
-+ * Validate version
-+ */
-+ if (ver == 0x0) {
-+ printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
-+ ver = 0x10;
-+ }
-+ apic_version[m->mpc_apicid] = ver;
-+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ /*
-+ * bios_cpu_apicid is required to have processors listed
-+ * in same order as logical cpu numbers. Hence the first
-+ * entry is BSP, and so on.
-+ */
-+ cpu = 0;
-+
-+ bios_cpu_apicid[0] = m->mpc_apicid;
-+ x86_cpu_to_apicid[0] = m->mpc_apicid;
-+ found_bsp = 1;
-+ } else
-+ cpu = num_processors - found_bsp;
-+ bios_cpu_apicid[cpu] = m->mpc_apicid;
-+ x86_cpu_to_apicid[cpu] = m->mpc_apicid;
-+
-+ cpu_set(cpu, cpu_possible_map);
-+ cpu_set(cpu, cpu_present_map);
-+}
-+#else
-+void __init MP_processor_info (struct mpc_config_processor *m)
-+{
-+ num_processors++;
-+}
-+#endif /* CONFIG_XEN */
-+
-+static void __init MP_bus_info (struct mpc_config_bus *m)
-+{
-+ char str[7];
-+
-+ memcpy(str, m->mpc_bustype, 6);
-+ str[6] = 0;
-+ Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
-+
-+ if (strncmp(str, "ISA", 3) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-+ } else if (strncmp(str, "EISA", 4) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-+ } else if (strncmp(str, "PCI", 3) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+ mp_current_pci_id++;
-+ } else if (strncmp(str, "MCA", 3) == 0) {
-+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-+ } else {
-+ printk(KERN_ERR "Unknown bustype %s\n", str);
-+ }
-+}
-+
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
-+{
-+ if (!(m->mpc_flags & MPC_APIC_USABLE))
-+ return;
-+
-+ printk("I/O APIC #%d Version %d at 0x%X.\n",
-+ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-+ if (nr_ioapics >= MAX_IO_APICS) {
-+ printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
-+ MAX_IO_APICS, nr_ioapics);
-+ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
-+ }
-+ if (!m->mpc_apicaddr) {
-+ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-+ " found in MP table, skipping!\n");
-+ return;
-+ }
-+ mp_ioapics[nr_ioapics] = *m;
-+ nr_ioapics++;
-+}
-+
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-+{
-+ mp_irqs [mp_irq_entries] = *m;
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+ m->mpc_irqtype, m->mpc_irqflag & 3,
-+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+ if (++mp_irq_entries >= MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!!\n");
-+}
-+
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-+{
-+ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+ m->mpc_irqtype, m->mpc_irqflag & 3,
-+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-+ /*
-+ * Well it seems all SMP boards in existence
-+ * use ExtINT/LVT1 == LINT0 and
-+ * NMI/LVT2 == LINT1 - the following check
-+ * will show us if this assumptions is false.
-+ * Until then we do not have to add baggage.
-+ */
-+ if ((m->mpc_irqtype == mp_ExtINT) &&
-+ (m->mpc_destapiclint != 0))
-+ BUG();
-+ if ((m->mpc_irqtype == mp_NMI) &&
-+ (m->mpc_destapiclint != 1))
-+ BUG();
-+}
-+
-+/*
-+ * Read/parse the MPC
-+ */
-+
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
-+{
-+ char str[16];
-+ int count=sizeof(*mpc);
-+ unsigned char *mpt=((unsigned char *)mpc)+count;
-+
-+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+ printk("SMP mptable: bad signature [%c%c%c%c]!\n",
-+ mpc->mpc_signature[0],
-+ mpc->mpc_signature[1],
-+ mpc->mpc_signature[2],
-+ mpc->mpc_signature[3]);
-+ return 0;
-+ }
-+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+ printk("SMP mptable: checksum error!\n");
-+ return 0;
-+ }
-+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-+ mpc->mpc_spec);
-+ return 0;
-+ }
-+ if (!mpc->mpc_lapic) {
-+ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-+ return 0;
-+ }
-+ memcpy(str,mpc->mpc_oem,8);
-+ str[8]=0;
-+ printk(KERN_INFO "OEM ID: %s ",str);
-+
-+ memcpy(str,mpc->mpc_productid,12);
-+ str[12]=0;
-+ printk("Product ID: %s ",str);
-+
-+ printk("APIC at: 0x%X\n",mpc->mpc_lapic);
-+
-+ /* save the local APIC address, it might be non-default */
-+ if (!acpi_lapic)
-+ mp_lapic_addr = mpc->mpc_lapic;
-+
-+ /*
-+ * Now process the configuration blocks.
-+ */
-+ while (count < mpc->mpc_length) {
-+ switch(*mpt) {
-+ case MP_PROCESSOR:
-+ {
-+ struct mpc_config_processor *m=
-+ (struct mpc_config_processor *)mpt;
-+ if (!acpi_lapic)
-+ MP_processor_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_BUS:
-+ {
-+ struct mpc_config_bus *m=
-+ (struct mpc_config_bus *)mpt;
-+ MP_bus_info(m);
-+ mpt += sizeof(*m);
-+ count += sizeof(*m);
-+ break;
-+ }
-+ case MP_IOAPIC:
-+ {
-+ struct mpc_config_ioapic *m=
-+ (struct mpc_config_ioapic *)mpt;
-+ MP_ioapic_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ case MP_INTSRC:
-+ {
-+ struct mpc_config_intsrc *m=
-+ (struct mpc_config_intsrc *)mpt;
-+
-+ MP_intsrc_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ case MP_LINTSRC:
-+ {
-+ struct mpc_config_lintsrc *m=
-+ (struct mpc_config_lintsrc *)mpt;
-+ MP_lintsrc_info(m);
-+ mpt+=sizeof(*m);
-+ count+=sizeof(*m);
-+ break;
-+ }
-+ }
-+ }
-+ clustered_apic_check();
-+ if (!num_processors)
-+ printk(KERN_ERR "SMP mptable: no processors registered!\n");
-+ return num_processors;
-+}
-+
-+static int __init ELCR_trigger(unsigned int irq)
-+{
-+ unsigned int port;
-+
-+ port = 0x4d0 + (irq >> 3);
-+ return (inb(port) >> (irq & 7)) & 1;
-+}
-+
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int i;
-+ int ELCR_fallback = 0;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqflag = 0; /* conforming */
-+ intsrc.mpc_srcbus = 0;
-+ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-+
-+ intsrc.mpc_irqtype = mp_INT;
-+
-+ /*
-+ * If true, we have an ISA/PCI system with no IRQ entries
-+ * in the MP table. To prevent the PCI interrupts from being set up
-+ * incorrectly, we try to use the ELCR. The sanity check to see if
-+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+ * never be level sensitive, so we simply see if the ELCR agrees.
-+ * If it does, we assume it's valid.
-+ */
-+ if (mpc_default_type == 5) {
-+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-+
-+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+ printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
-+ else {
-+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+ ELCR_fallback = 1;
-+ }
-+ }
-+
-+ for (i = 0; i < 16; i++) {
-+ switch (mpc_default_type) {
-+ case 2:
-+ if (i == 0 || i == 13)
-+ continue; /* IRQ0 & IRQ13 not connected */
-+ /* fall through */
-+ default:
-+ if (i == 2)
-+ continue; /* IRQ2 is never connected */
-+ }
-+
-+ if (ELCR_fallback) {
-+ /*
-+ * If the ELCR indicates a level-sensitive interrupt, we
-+ * copy that information over to the MP table in the
-+ * irqflag field (level sensitive, active high polarity).
-+ */
-+ if (ELCR_trigger(i))
-+ intsrc.mpc_irqflag = 13;
-+ else
-+ intsrc.mpc_irqflag = 0;
-+ }
-+
-+ intsrc.mpc_srcbusirq = i;
-+ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
-+ MP_intsrc_info(&intsrc);
-+ }
-+
-+ intsrc.mpc_irqtype = mp_ExtINT;
-+ intsrc.mpc_srcbusirq = 0;
-+ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
-+ MP_intsrc_info(&intsrc);
-+}
-+
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-+{
-+ struct mpc_config_processor processor;
-+ struct mpc_config_bus bus;
-+ struct mpc_config_ioapic ioapic;
-+ struct mpc_config_lintsrc lintsrc;
-+ int linttypes[2] = { mp_ExtINT, mp_NMI };
-+ int i;
-+
-+ /*
-+ * local APIC has default address
-+ */
-+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-+
-+ /*
-+ * 2 CPUs, numbered 0 & 1.
-+ */
-+ processor.mpc_type = MP_PROCESSOR;
-+ /* Either an integrated APIC or a discrete 82489DX. */
-+ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+ processor.mpc_cpuflag = CPU_ENABLED;
-+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+ (boot_cpu_data.x86_model << 4) |
-+ boot_cpu_data.x86_mask;
-+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+ processor.mpc_reserved[0] = 0;
-+ processor.mpc_reserved[1] = 0;
-+ for (i = 0; i < 2; i++) {
-+ processor.mpc_apicid = i;
-+ MP_processor_info(&processor);
-+ }
-+
-+ bus.mpc_type = MP_BUS;
-+ bus.mpc_busid = 0;
-+ switch (mpc_default_type) {
-+ default:
-+ printk(KERN_ERR "???\nUnknown standard configuration %d\n",
-+ mpc_default_type);
-+ /* fall through */
-+ case 1:
-+ case 5:
-+ memcpy(bus.mpc_bustype, "ISA ", 6);
-+ break;
-+ case 2:
-+ case 6:
-+ case 3:
-+ memcpy(bus.mpc_bustype, "EISA ", 6);
-+ break;
-+ case 4:
-+ case 7:
-+ memcpy(bus.mpc_bustype, "MCA ", 6);
-+ }
-+ MP_bus_info(&bus);
-+ if (mpc_default_type > 4) {
-+ bus.mpc_busid = 1;
-+ memcpy(bus.mpc_bustype, "PCI ", 6);
-+ MP_bus_info(&bus);
-+ }
-+
-+ ioapic.mpc_type = MP_IOAPIC;
-+ ioapic.mpc_apicid = 2;
-+ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+ ioapic.mpc_flags = MPC_APIC_USABLE;
-+ ioapic.mpc_apicaddr = 0xFEC00000;
-+ MP_ioapic_info(&ioapic);
-+
-+ /*
-+ * We set up most of the low 16 IO-APIC pins according to MPS rules.
-+ */
-+ construct_default_ioirq_mptable(mpc_default_type);
-+
-+ lintsrc.mpc_type = MP_LINTSRC;
-+ lintsrc.mpc_irqflag = 0; /* conforming */
-+ lintsrc.mpc_srcbusid = 0;
-+ lintsrc.mpc_srcbusirq = 0;
-+ lintsrc.mpc_destapic = MP_APIC_ALL;
-+ for (i = 0; i < 2; i++) {
-+ lintsrc.mpc_irqtype = linttypes[i];
-+ lintsrc.mpc_destapiclint = i;
-+ MP_lintsrc_info(&lintsrc);
-+ }
-+}
-+
-+static struct intel_mp_floating *mpf_found;
-+
-+/*
-+ * Scan the memory blocks for an SMP configuration block.
-+ */
-+void __init get_smp_config (void)
-+{
-+ struct intel_mp_floating *mpf = mpf_found;
-+
-+ /*
-+ * ACPI supports both logical (e.g. Hyper-Threading) and physical
-+ * processors, where MPS only supports physical.
-+ */
-+ if (acpi_lapic && acpi_ioapic) {
-+ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+ return;
-+ }
-+ else if (acpi_lapic)
-+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-+
-+ printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+ if (mpf->mpf_feature2 & (1<<7)) {
-+ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
-+ pic_mode = 1;
-+ } else {
-+ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
-+ pic_mode = 0;
-+ }
-+
-+ /*
-+ * Now see if we need to read further.
-+ */
-+ if (mpf->mpf_feature1 != 0) {
-+
-+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+ construct_default_ISA_mptable(mpf->mpf_feature1);
-+
-+ } else if (mpf->mpf_physptr) {
-+
-+ /*
-+ * Read the physical hardware table. Anything here will
-+ * override the defaults.
-+ */
-+ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+ smp_found_config = 0;
-+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+ return;
-+ }
-+ /*
-+ * If there are no explicit MP IRQ entries, then we are
-+ * broken. We set up most of the low 16 IO-APIC pins to
-+ * ISA defaults and hope it will work.
-+ */
-+ if (!mp_irq_entries) {
-+ struct mpc_config_bus bus;
-+
-+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
-+
-+ bus.mpc_type = MP_BUS;
-+ bus.mpc_busid = 0;
-+ memcpy(bus.mpc_bustype, "ISA ", 6);
-+ MP_bus_info(&bus);
-+
-+ construct_default_ioirq_mptable(0);
-+ }
-+
-+ } else
-+ BUG();
-+
-+ printk(KERN_INFO "Processors: %d\n", num_processors);
-+ /*
-+ * Only use the first configuration found.
-+ */
-+}
-+
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
-+{
-+ extern void __bad_mpf_size(void);
-+ unsigned int *bp = isa_bus_to_virt(base);
-+ struct intel_mp_floating *mpf;
-+
-+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+ if (sizeof(*mpf) != 16)
-+ __bad_mpf_size();
-+
-+ while (length > 0) {
-+ mpf = (struct intel_mp_floating *)bp;
-+ if ((*bp == SMP_MAGIC_IDENT) &&
-+ (mpf->mpf_length == 1) &&
-+ !mpf_checksum((unsigned char *)bp, 16) &&
-+ ((mpf->mpf_specification == 1)
-+ || (mpf->mpf_specification == 4)) ) {
-+
-+ smp_found_config = 1;
-+ mpf_found = mpf;
-+ return 1;
-+ }
-+ bp += 4;
-+ length -= 16;
-+ }
-+ return 0;
-+}
-+
-+void __init find_intel_smp (void)
-+{
-+ unsigned int address;
-+
-+ /*
-+ * FIXME: Linux assumes you have 640K of base ram..
-+ * this continues the error...
-+ *
-+ * 1) Scan the bottom 1K for a signature
-+ * 2) Scan the top 1K of base RAM
-+ * 3) Scan the 64K of bios
-+ */
-+ if (smp_scan_config(0x0,0x400) ||
-+ smp_scan_config(639*0x400,0x400) ||
-+ smp_scan_config(0xF0000,0x10000))
-+ return;
-+ /*
-+ * If it is an SMP machine we should know now, unless the
-+ * configuration is in an EISA/MCA bus machine with an
-+ * extended bios data area.
-+ *
-+ * there is a real-mode segmented pointer pointing to the
-+ * 4K EBDA area at 0x40E, calculate and scan it here.
-+ *
-+ * NOTE! There are Linux loaders that will corrupt the EBDA
-+ * area, and as such this kind of SMP config may be less
-+ * trustworthy, simply because the SMP table may have been
-+ * stomped on during early boot. These loaders are buggy and
-+ * should be fixed.
-+ */
-+
-+ address = *(unsigned short *)phys_to_virt(0x40E);
-+ address <<= 4;
-+ if (smp_scan_config(address, 0x1000))
-+ return;
-+
-+ /* If we have come this far, we did not find an MP table */
-+ printk(KERN_INFO "No mptable found.\n");
-+}
-+
-+/*
-+ * - Intel MP Configuration Table
-+ */
-+void __init find_smp_config (void)
-+{
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ find_intel_smp();
-+#endif
-+}
-+
-+
-+/* --------------------------------------------------------------------------
-+ ACPI-based MP Configuration
-+ -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI
-+
-+void __init mp_register_lapic_address (
-+ u64 address)
-+{
-+#ifndef CONFIG_XEN
-+ mp_lapic_addr = (unsigned long) address;
-+
-+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
-+
-+ if (boot_cpu_id == -1U)
-+ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
-+
-+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-+#endif
-+}
-+
-+
-+void __init mp_register_lapic (
-+ u8 id,
-+ u8 enabled)
-+{
-+ struct mpc_config_processor processor;
-+ int boot_cpu = 0;
-+
-+ if (id >= MAX_APICS) {
-+ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-+ id, MAX_APICS);
-+ return;
-+ }
-+
-+ if (id == boot_cpu_physical_apicid)
-+ boot_cpu = 1;
-+
-+#ifndef CONFIG_XEN
-+ processor.mpc_type = MP_PROCESSOR;
-+ processor.mpc_apicid = id;
-+ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
-+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+ processor.mpc_reserved[0] = 0;
-+ processor.mpc_reserved[1] = 0;
-+#endif
-+
-+ MP_processor_info(&processor);
-+}
-+
-+#ifdef CONFIG_X86_IO_APIC
-+
-+#define MP_ISA_BUS 0
-+#define MP_MAX_IOAPIC_PIN 127
-+
-+static struct mp_ioapic_routing {
-+ int apic_id;
-+ int gsi_start;
-+ int gsi_end;
-+ u32 pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
-+
-+
-+static int mp_find_ioapic (
-+ int gsi)
-+{
-+ int i = 0;
-+
-+ /* Find the IOAPIC that manages this GSI. */
-+ for (i = 0; i < nr_ioapics; i++) {
-+ if ((gsi >= mp_ioapic_routing[i].gsi_start)
-+ && (gsi <= mp_ioapic_routing[i].gsi_end))
-+ return i;
-+ }
-+
-+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-+
-+ return -1;
-+}
-+
-+
-+void __init mp_register_ioapic (
-+ u8 id,
-+ u32 address,
-+ u32 gsi_base)
-+{
-+ int idx = 0;
-+
-+ if (nr_ioapics >= MAX_IO_APICS) {
-+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-+ }
-+ if (!address) {
-+ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+ " found in MADT table, skipping!\n");
-+ return;
-+ }
-+
-+ idx = nr_ioapics++;
-+
-+ mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+ mp_ioapics[idx].mpc_apicaddr = address;
-+
-+#ifndef CONFIG_XEN
-+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-+#endif
-+ mp_ioapics[idx].mpc_apicid = id;
-+ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-+
-+ /*
-+ * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
-+ * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
-+ */
-+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+ mp_ioapic_routing[idx].gsi_start = gsi_base;
-+ mp_ioapic_routing[idx].gsi_end = gsi_base +
-+ io_apic_get_redir_entries(idx);
-+
-+ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
-+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
-+ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+ mp_ioapic_routing[idx].gsi_start,
-+ mp_ioapic_routing[idx].gsi_end);
-+
-+ return;
-+}
-+
-+
-+void __init mp_override_legacy_irq (
-+ u8 bus_irq,
-+ u8 polarity,
-+ u8 trigger,
-+ u32 gsi)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int ioapic = -1;
-+ int pin = -1;
-+
-+ /*
-+ * Convert 'gsi' to 'ioapic.pin'.
-+ */
-+ ioapic = mp_find_ioapic(gsi);
-+ if (ioapic < 0)
-+ return;
-+ pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-+
-+ /*
-+ * TBD: This check is for faulty timer entries, where the override
-+ * erroneously sets the trigger to level, resulting in a HUGE
-+ * increase of timer interrupts!
-+ */
-+ if ((bus_irq == 0) && (trigger == 3))
-+ trigger = 1;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqtype = mp_INT;
-+ intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+ intsrc.mpc_srcbus = MP_ISA_BUS;
-+ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
-+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
-+ intsrc.mpc_dstirq = pin; /* INTIN# */
-+
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
-+ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
-+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
-+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-+
-+ mp_irqs[mp_irq_entries] = intsrc;
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!\n");
-+
-+ return;
-+}
-+
-+
-+void __init mp_config_acpi_legacy_irqs (void)
-+{
-+ struct mpc_config_intsrc intsrc;
-+ int i = 0;
-+ int ioapic = -1;
-+
-+ /*
-+ * Fabricate the legacy ISA bus (bus #31).
-+ */
-+ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-+ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
-+
-+ /*
-+ * Locate the IOAPIC that manages the ISA IRQs (0-15).
-+ */
-+ ioapic = mp_find_ioapic(0);
-+ if (ioapic < 0)
-+ return;
-+
-+ intsrc.mpc_type = MP_INTSRC;
-+ intsrc.mpc_irqflag = 0; /* Conforming */
-+ intsrc.mpc_srcbus = MP_ISA_BUS;
-+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
-+
-+ /*
-+ * Use the default configuration for the IRQs 0-15. Unless
-+ * overridden by (MADT) interrupt source override entries.
-+ */
-+ for (i = 0; i < 16; i++) {
-+ int idx;
-+
-+ for (idx = 0; idx < mp_irq_entries; idx++) {
-+ struct mpc_config_intsrc *irq = mp_irqs + idx;
-+
-+ /* Do we already have a mapping for this ISA IRQ? */
-+ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+ break;
-+
-+ /* Do we already have a mapping for this IOAPIC pin */
-+ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+ (irq->mpc_dstirq == i))
-+ break;
-+ }
-+
-+ if (idx != mp_irq_entries) {
-+ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+ continue; /* IRQ already used */
-+ }
-+
-+ intsrc.mpc_irqtype = mp_INT;
-+ intsrc.mpc_srcbusirq = i; /* Identity mapped */
-+ intsrc.mpc_dstirq = i;
-+
-+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
-+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
-+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
-+ intsrc.mpc_dstirq);
-+
-+ mp_irqs[mp_irq_entries] = intsrc;
-+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+ panic("Max # of irq sources exceeded!\n");
-+ }
-+
-+ return;
-+}
-+
-+#define MAX_GSI_NUM 4096
-+
-+int mp_register_gsi(u32 gsi, int triggering, int polarity)
-+{
-+ int ioapic = -1;
-+ int ioapic_pin = 0;
-+ int idx, bit = 0;
-+ static int pci_irq = 16;
-+ /*
-+ * Mapping between Global System Interrupts, which
-+ * represent all possible interrupts, to the IRQs
-+ * assigned to actual devices.
-+ */
-+ static int gsi_to_irq[MAX_GSI_NUM];
-+
-+ if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
-+ return gsi;
-+
-+ /* Don't set up the ACPI SCI because it's already set up */
-+ if (acpi_fadt.sci_int == gsi)
-+ return gsi;
-+
-+ ioapic = mp_find_ioapic(gsi);
-+ if (ioapic < 0) {
-+ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+ return gsi;
-+ }
-+
-+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-+
-+ /*
-+ * Avoid pin reprogramming. PRTs typically include entries
-+ * with redundant pin->gsi mappings (but unique PCI devices);
-+ * we only program the IOAPIC on the first.
-+ */
-+ bit = ioapic_pin % 32;
-+ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+ if (idx > 3) {
-+ printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
-+ ioapic_pin);
-+ return gsi;
-+ }
-+ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+ return gsi_to_irq[gsi];
-+ }
-+
-+ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
-+
-+ if (triggering == ACPI_LEVEL_SENSITIVE) {
-+ /*
-+ * For PCI devices assign IRQs in order, avoiding gaps
-+ * due to unused I/O APIC pins.
-+ */
-+ int irq = gsi;
-+ if (gsi < MAX_GSI_NUM) {
-+ if (gsi > 15)
-+ gsi = pci_irq++;
-+ /*
-+ * Don't assign IRQ used by ACPI SCI
-+ */
-+ if (gsi == acpi_fadt.sci_int)
-+ gsi = pci_irq++;
-+ gsi_to_irq[irq] = gsi;
-+ } else {
-+ printk(KERN_ERR "GSI %u is too high\n", gsi);
-+ return gsi;
-+ }
-+ }
-+
-+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-+ return gsi;
-+}
-+
-+#endif /*CONFIG_X86_IO_APIC*/
-+#endif /*CONFIG_ACPI*/
-diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
-index 0c3f052..9f62301 100644
---- a/arch/x86_64/kernel/pci-gart.c
-+++ b/arch/x86_64/kernel/pci-gart.c
-@@ -274,7 +274,8 @@ static dma_addr_t gart_map_simple(struct
- }
-
- /* Map a single area into the IOMMU */
--dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
-+dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size,
-+ enum dma_data_direction dir)
- {
- unsigned long phys_mem, bus;
-
-@@ -392,7 +393,8 @@ static inline int dma_map_cont(struct sc
- * DMA map all entries in a scatterlist.
- * Merge chunks that have page aligned sizes into a continuous mapping.
- */
--int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
-+int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-+ enum dma_data_direction dir)
- {
- int i;
- int out;
-@@ -465,7 +467,7 @@ error:
- * Free a DMA mapping.
- */
- void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
-- size_t size, int direction)
-+ size_t size, enum dma_data_direction direction)
- {
- unsigned long iommu_page;
- int npages;
-diff --git a/arch/x86_64/kernel/pci-swiotlb-xen.c b/arch/x86_64/kernel/pci-swiotlb-xen.c
-new file mode 100644
-index 0000000..0a5a8d4
---- /dev/null
-+++ b/arch/x86_64/kernel/pci-swiotlb-xen.c
-@@ -0,0 +1,54 @@
-+/* Glue code to lib/swiotlb.c */
-+
-+#include <linux/pci.h>
-+#include <linux/cache.h>
-+#include <linux/module.h>
-+#include <asm/dma-mapping.h>
-+#include <asm/proto.h>
-+#include <asm/swiotlb.h>
-+#include <asm/dma.h>
-+
-+#if 0
-+int swiotlb __read_mostly;
-+EXPORT_SYMBOL(swiotlb);
-+#endif
-+
-+struct dma_mapping_ops swiotlb_dma_ops = {
-+#if 0
-+ .mapping_error = swiotlb_dma_mapping_error,
-+ .alloc_coherent = swiotlb_alloc_coherent,
-+ .free_coherent = swiotlb_free_coherent,
-+ .map_single = swiotlb_map_single,
-+ .unmap_single = swiotlb_unmap_single,
-+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-+ .sync_single_for_device = swiotlb_sync_single_for_device,
-+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
-+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
-+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
-+ .map_sg = swiotlb_map_sg,
-+ .unmap_sg = swiotlb_unmap_sg,
-+ .dma_supported = NULL,
-+#endif
-+};
-+
-+void pci_swiotlb_init(void)
-+{
-+#if 0
-+ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
-+ if (!iommu_aperture && !no_iommu &&
-+ (end_pfn > MAX_DMA32_PFN || force_iommu))
-+ swiotlb = 1;
-+ if (swiotlb) {
-+ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-+ swiotlb_init();
-+ dma_ops = &swiotlb_dma_ops;
-+ }
-+#else
-+ swiotlb_init();
-+ if (swiotlb) {
-+ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
-+ dma_ops = &swiotlb_dma_ops;
-+ }
-+#endif
-+}
-diff --git a/arch/x86_64/kernel/process-xen.c b/arch/x86_64/kernel/process-xen.c
-new file mode 100644
-index 0000000..4c40063
---- /dev/null
-+++ b/arch/x86_64/kernel/process-xen.c
-@@ -0,0 +1,803 @@
-+/*
-+ * linux/arch/x86-64/kernel/process.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Pentium III FXSR, SSE support
-+ * Gareth Hughes <gareth@valinux.com>, May 2000
-+ *
-+ * X86-64 port
-+ * Andi Kleen.
-+ *
-+ * CPU hotplug support - ashok.raj@intel.com
-+ * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of process handling..
-+ */
-+
-+#include <stdarg.h>
-+
-+#include <linux/cpu.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/module.h>
-+#include <linux/a.out.h>
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/random.h>
-+#include <linux/kprobes.h>
-+#include <linux/notifier.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/mmu_context.h>
-+#include <asm/pda.h>
-+#include <asm/prctl.h>
-+#include <asm/kdebug.h>
-+#include <xen/interface/dom0_ops.h>
-+#include <xen/interface/physdev.h>
-+#include <xen/interface/vcpu.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/hardirq.h>
-+#include <asm/ia32.h>
-+#include <asm/idle.h>
-+
-+asmlinkage extern void ret_from_fork(void);
-+
-+unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
-+
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
-+
-+/*
-+ * Powermanagement idle function, if any..
-+ */
-+void (*pm_idle)(void);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-+
-+static struct notifier_block *idle_notifier;
-+static DEFINE_SPINLOCK(idle_notifier_lock);
-+
-+void idle_notifier_register(struct notifier_block *n)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&idle_notifier_lock, flags);
-+ notifier_chain_register(&idle_notifier, n);
-+ spin_unlock_irqrestore(&idle_notifier_lock, flags);
-+}
-+EXPORT_SYMBOL_GPL(idle_notifier_register);
-+
-+void idle_notifier_unregister(struct notifier_block *n)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&idle_notifier_lock, flags);
-+ notifier_chain_unregister(&idle_notifier, n);
-+ spin_unlock_irqrestore(&idle_notifier_lock, flags);
-+}
-+EXPORT_SYMBOL(idle_notifier_unregister);
-+
-+enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
-+static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
-+
-+void enter_idle(void)
-+{
-+ __get_cpu_var(idle_state) = CPU_IDLE;
-+ notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-+}
-+
-+static void __exit_idle(void)
-+{
-+ __get_cpu_var(idle_state) = CPU_NOT_IDLE;
-+ notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-+}
-+
-+/* Called from interrupts to signify idle end */
-+void exit_idle(void)
-+{
-+ if (current->pid | read_pda(irqcount))
-+ return;
-+ __exit_idle();
-+}
-+
-+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-+extern void stop_hz_timer(void);
-+extern void start_hz_timer(void);
-+void xen_idle(void)
-+{
-+ local_irq_disable();
-+
-+ if (need_resched())
-+ local_irq_enable();
-+ else {
-+ clear_thread_flag(TIF_POLLING_NRFLAG);
-+ smp_mb__after_clear_bit();
-+ stop_hz_timer();
-+ /* Blocking includes an implicit local_irq_enable(). */
-+ HYPERVISOR_sched_op(SCHEDOP_block, 0);
-+ start_hz_timer();
-+ set_thread_flag(TIF_POLLING_NRFLAG);
-+ }
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static inline void play_dead(void)
-+{
-+ idle_task_exit();
-+ local_irq_disable();
-+ cpu_clear(smp_processor_id(), cpu_initialized);
-+ preempt_enable_no_resched();
-+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
-+ /* Same as drivers/xen/core/smpboot.c:cpu_bringup(). */
-+ cpu_init();
-+ touch_softlockup_watchdog();
-+ preempt_disable();
-+ local_irq_enable();
-+}
-+#else
-+static inline void play_dead(void)
-+{
-+ BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
-+ */
-+void cpu_idle (void)
-+{
-+ set_thread_flag(TIF_POLLING_NRFLAG);
-+
-+ /* endless idle loop with no priority at all */
-+ while (1) {
-+ while (!need_resched()) {
-+ if (__get_cpu_var(cpu_idle_state))
-+ __get_cpu_var(cpu_idle_state) = 0;
-+ rmb();
-+
-+ if (cpu_is_offline(smp_processor_id()))
-+ play_dead();
-+ enter_idle();
-+ xen_idle();
-+ __exit_idle();
-+ }
-+
-+ preempt_enable_no_resched();
-+ schedule();
-+ preempt_disable();
-+ }
-+}
-+
-+void cpu_idle_wait(void)
-+{
-+ unsigned int cpu, this_cpu = get_cpu();
-+ cpumask_t map;
-+
-+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+ put_cpu();
-+
-+ cpus_clear(map);
-+ for_each_online_cpu(cpu) {
-+ per_cpu(cpu_idle_state, cpu) = 1;
-+ cpu_set(cpu, map);
-+ }
-+
-+ __get_cpu_var(cpu_idle_state) = 0;
-+
-+ wmb();
-+ do {
-+ ssleep(1);
-+ for_each_online_cpu(cpu) {
-+ if (cpu_isset(cpu, map) &&
-+ !per_cpu(cpu_idle_state, cpu))
-+ cpu_clear(cpu, map);
-+ }
-+ cpus_and(map, map, cpu_online_map);
-+ } while (!cpus_empty(map));
-+}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
-+
-+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
-+/* Always use xen_idle() instead. */
-+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) {}
-+
-+/* Prints also some state that isn't saved in the pt_regs */
-+void __show_regs(struct pt_regs * regs)
-+{
-+ unsigned long fs, gs, shadowgs;
-+ unsigned int fsindex,gsindex;
-+ unsigned int ds,cs,es;
-+
-+ printk("\n");
-+ print_modules();
-+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-+ current->pid, current->comm, print_tainted(),
-+ system_utsname.release,
-+ (int)strcspn(system_utsname.version, " "),
-+ system_utsname.version);
-+ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
-+ printk_address(regs->rip);
-+ printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
-+ regs->eflags);
-+ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
-+ regs->rax, regs->rbx, regs->rcx);
-+ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
-+ regs->rdx, regs->rsi, regs->rdi);
-+ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
-+ regs->rbp, regs->r8, regs->r9);
-+ printk("R10: %016lx R11: %016lx R12: %016lx\n",
-+ regs->r10, regs->r11, regs->r12);
-+ printk("R13: %016lx R14: %016lx R15: %016lx\n",
-+ regs->r13, regs->r14, regs->r15);
-+
-+ asm("mov %%ds,%0" : "=r" (ds));
-+ asm("mov %%cs,%0" : "=r" (cs));
-+ asm("mov %%es,%0" : "=r" (es));
-+ asm("mov %%fs,%0" : "=r" (fsindex));
-+ asm("mov %%gs,%0" : "=r" (gsindex));
-+
-+ rdmsrl(MSR_FS_BASE, fs);
-+ rdmsrl(MSR_GS_BASE, gs);
-+ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
-+
-+ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
-+ fs,fsindex,gs,gsindex,shadowgs);
-+ printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
-+
-+}
-+
-+void show_regs(struct pt_regs *regs)
-+{
-+ printk("CPU %d:", smp_processor_id());
-+ __show_regs(regs);
-+ show_trace(&regs->rsp);
-+}
-+
-+/*
-+ * Free current thread data structures etc..
-+ */
-+void exit_thread(void)
-+{
-+ struct task_struct *me = current;
-+ struct thread_struct *t = &me->thread;
-+
-+ /*
-+ * Remove function-return probe instances associated with this task
-+ * and put them back on the free list. Do not insert an exit probe for
-+ * this function, it will be disabled by kprobe_flush_task if you do.
-+ */
-+ kprobe_flush_task(me);
-+
-+ if (me->thread.io_bitmap_ptr) {
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-+#endif
-+#ifdef CONFIG_XEN
-+ static physdev_op_t iobmp_op = {
-+ .cmd = PHYSDEVOP_SET_IOBITMAP
-+ };
-+#endif
-+
-+ kfree(t->io_bitmap_ptr);
-+ t->io_bitmap_ptr = NULL;
-+ /*
-+ * Careful, clear this in the TSS too:
-+ */
-+#ifndef CONFIG_X86_NO_TSS
-+ memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
-+ put_cpu();
-+#endif
-+#ifdef CONFIG_XEN
-+ HYPERVISOR_physdev_op(&iobmp_op);
-+#endif
-+ t->io_bitmap_max = 0;
-+ }
-+}
-+
-+void load_gs_index(unsigned gs)
-+{
-+ HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
-+}
-+
-+void flush_thread(void)
-+{
-+ struct task_struct *tsk = current;
-+ struct thread_info *t = current_thread_info();
-+
-+ if (t->flags & _TIF_ABI_PENDING)
-+ t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
-+
-+ tsk->thread.debugreg0 = 0;
-+ tsk->thread.debugreg1 = 0;
-+ tsk->thread.debugreg2 = 0;
-+ tsk->thread.debugreg3 = 0;
-+ tsk->thread.debugreg6 = 0;
-+ tsk->thread.debugreg7 = 0;
-+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-+ /*
-+ * Forget coprocessor state..
-+ */
-+ clear_fpu(tsk);
-+ clear_used_math();
-+}
-+
-+void release_thread(struct task_struct *dead_task)
-+{
-+ if (dead_task->mm) {
-+ if (dead_task->mm->context.size) {
-+ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
-+ dead_task->comm,
-+ dead_task->mm->context.ldt,
-+ dead_task->mm->context.size);
-+ BUG();
-+ }
-+ }
-+}
-+
-+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
-+{
-+ struct user_desc ud = {
-+ .base_addr = addr,
-+ .limit = 0xfffff,
-+ .contents = (3 << 3), /* user */
-+ .seg_32bit = 1,
-+ .limit_in_pages = 1,
-+ .useable = 1,
-+ };
-+ struct n_desc_struct *desc = (void *)t->thread.tls_array;
-+ desc += tls;
-+ desc->a = LDT_entry_a(&ud);
-+ desc->b = LDT_entry_b(&ud);
-+}
-+
-+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
-+{
-+ struct desc_struct *desc = (void *)t->thread.tls_array;
-+ desc += tls;
-+ return desc->base0 |
-+ (((u32)desc->base1) << 16) |
-+ (((u32)desc->base2) << 24);
-+}
-+
-+/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
-+ */
-+void prepare_to_copy(struct task_struct *tsk)
-+{
-+ unlazy_fpu(tsk);
-+}
-+
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
-+ unsigned long unused,
-+ struct task_struct * p, struct pt_regs * regs)
-+{
-+ int err;
-+ struct pt_regs * childregs;
-+ struct task_struct *me = current;
-+
-+ childregs = ((struct pt_regs *)
-+ (THREAD_SIZE + task_stack_page(p))) - 1;
-+ *childregs = *regs;
-+
-+ childregs->rax = 0;
-+ childregs->rsp = rsp;
-+ if (rsp == ~0UL)
-+ childregs->rsp = (unsigned long)childregs;
-+
-+ p->thread.rsp = (unsigned long) childregs;
-+ p->thread.rsp0 = (unsigned long) (childregs+1);
-+ p->thread.userrsp = me->thread.userrsp;
-+
-+ set_tsk_thread_flag(p, TIF_FORK);
-+
-+ p->thread.fs = me->thread.fs;
-+ p->thread.gs = me->thread.gs;
-+
-+ asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
-+ asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
-+ asm("mov %%es,%0" : "=m" (p->thread.es));
-+ asm("mov %%ds,%0" : "=m" (p->thread.ds));
-+
-+ if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
-+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+ if (!p->thread.io_bitmap_ptr) {
-+ p->thread.io_bitmap_max = 0;
-+ return -ENOMEM;
-+ }
-+ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
-+ IO_BITMAP_BYTES);
-+ }
-+
-+ /*
-+ * Set a new TLS for the child thread?
-+ */
-+ if (clone_flags & CLONE_SETTLS) {
-+#ifdef CONFIG_IA32_EMULATION
-+ if (test_thread_flag(TIF_IA32))
-+ err = ia32_child_tls(p, childregs);
-+ else
-+#endif
-+ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
-+ if (err)
-+ goto out;
-+ }
-+ p->thread.iopl = current->thread.iopl;
-+
-+ err = 0;
-+out:
-+ if (err && p->thread.io_bitmap_ptr) {
-+ kfree(p->thread.io_bitmap_ptr);
-+ p->thread.io_bitmap_max = 0;
-+ }
-+ return err;
-+}
-+
-+/*
-+ * switch_to(x,y) should switch tasks from x to y.
-+ *
-+ * This could still be optimized:
-+ * - fold all the options into a flag word and test it with a single test.
-+ * - could test fs/gs bitsliced
-+ *
-+ * Kprobes not supported here. Set the probe on schedule instead.
-+ */
-+__kprobes struct task_struct *
-+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ struct thread_struct *prev = &prev_p->thread,
-+ *next = &next_p->thread;
-+ int cpu = smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+#endif
-+ physdev_op_t iopl_op, iobmp_op;
-+ multicall_entry_t _mcl[8], *mcl = _mcl;
-+
-+ /*
-+ * This is basically '__unlazy_fpu', except that we queue a
-+ * multicall to indicate FPU task switch, rather than
-+ * synchronously trapping to Xen.
-+ */
-+ if (prev_p->thread_info->status & TS_USEDFPU) {
-+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+ mcl->op = __HYPERVISOR_fpu_taskswitch;
-+ mcl->args[0] = 1;
-+ mcl++;
-+ }
-+
-+ /*
-+ * Reload esp0, LDT and the page table pointer:
-+ */
-+ mcl->op = __HYPERVISOR_stack_switch;
-+ mcl->args[0] = __KERNEL_DS;
-+ mcl->args[1] = next->rsp0;
-+ mcl++;
-+
-+ /*
-+ * Load the per-thread Thread-Local Storage descriptor.
-+ * This is load_TLS(next, cpu) with multicalls.
-+ */
-+#define C(i) do { \
-+ if (unlikely(next->tls_array[i] != prev->tls_array[i])) { \
-+ mcl->op = __HYPERVISOR_update_descriptor; \
-+ mcl->args[0] = virt_to_machine( \
-+ &cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]); \
-+ mcl->args[1] = next->tls_array[i]; \
-+ mcl++; \
-+ } \
-+} while (0)
-+ C(0); C(1); C(2);
-+#undef C
-+
-+ if (unlikely(prev->iopl != next->iopl)) {
-+ iopl_op.cmd = PHYSDEVOP_SET_IOPL;
-+ iopl_op.u.set_iopl.iopl = (next->iopl == 0) ? 1 : next->iopl;
-+ mcl->op = __HYPERVISOR_physdev_op;
-+ mcl->args[0] = (unsigned long)&iopl_op;
-+ mcl++;
-+ }
-+
-+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+ iobmp_op.cmd =
-+ PHYSDEVOP_SET_IOBITMAP;
-+ iobmp_op.u.set_iobitmap.bitmap =
-+ (char *)next->io_bitmap_ptr;
-+ iobmp_op.u.set_iobitmap.nr_ports =
-+ next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+ mcl->op = __HYPERVISOR_physdev_op;
-+ mcl->args[0] = (unsigned long)&iobmp_op;
-+ mcl++;
-+ }
-+
-+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
-+ /*
-+ * Switch DS and ES.
-+ * This won't pick up thread selector changes, but I guess that is ok.
-+ */
-+ if (unlikely(next->es))
-+ loadsegment(es, next->es);
-+
-+ if (unlikely(next->ds))
-+ loadsegment(ds, next->ds);
-+
-+ /*
-+ * Switch FS and GS.
-+ */
-+ if (unlikely(next->fsindex))
-+ loadsegment(fs, next->fsindex);
-+
-+ if (next->fs)
-+ HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs);
-+
-+ if (unlikely(next->gsindex))
-+ load_gs_index(next->gsindex);
-+
-+ if (next->gs)
-+ HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs);
-+
-+ /*
-+ * Switch the PDA context.
-+ */
-+ prev->userrsp = read_pda(oldrsp);
-+ write_pda(oldrsp, next->userrsp);
-+ write_pda(pcurrent, next_p);
-+ write_pda(kernelstack,
-+ task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
-+
-+ /*
-+ * Now maybe reload the debug registers
-+ */
-+ if (unlikely(next->debugreg7)) {
-+ set_debugreg(next->debugreg0, 0);
-+ set_debugreg(next->debugreg1, 1);
-+ set_debugreg(next->debugreg2, 2);
-+ set_debugreg(next->debugreg3, 3);
-+ /* no 4 and 5 */
-+ set_debugreg(next->debugreg6, 6);
-+ set_debugreg(next->debugreg7, 7);
-+ }
-+
-+ return prev_p;
-+}
-+
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage
-+long sys_execve(char __user *name, char __user * __user *argv,
-+ char __user * __user *envp, struct pt_regs regs)
-+{
-+ long error;
-+ char * filename;
-+
-+ filename = getname(name);
-+ error = PTR_ERR(filename);
-+ if (IS_ERR(filename))
-+ return error;
-+ error = do_execve(filename, argv, envp, &regs);
-+ if (error == 0) {
-+ task_lock(current);
-+ current->ptrace &= ~PT_DTRACE;
-+ task_unlock(current);
-+ }
-+ putname(filename);
-+ return error;
-+}
-+
-+void set_personality_64bit(void)
-+{
-+ /* inherit personality from parent */
-+
-+ /* Make sure to be in 64bit mode */
-+ clear_thread_flag(TIF_IA32);
-+
-+ /* TBD: overwrites user setup. Should have two bits.
-+ But 64bit processes have always behaved this way,
-+ so it's not too bad. The main problem is just that
-+ 32bit childs are affected again. */
-+ current->personality &= ~READ_IMPLIES_EXEC;
-+}
-+
-+asmlinkage long sys_fork(struct pt_regs *regs)
-+{
-+ return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
-+}
-+
-+asmlinkage long
-+sys_clone(unsigned long clone_flags, unsigned long newsp,
-+ void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
-+{
-+ if (!newsp)
-+ newsp = regs->rsp;
-+ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
-+}
-+
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage long sys_vfork(struct pt_regs *regs)
-+{
-+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
-+ NULL, NULL);
-+}
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+ unsigned long stack;
-+ u64 fp,rip;
-+ int count = 0;
-+
-+ if (!p || p == current || p->state==TASK_RUNNING)
-+ return 0;
-+ stack = (unsigned long)task_stack_page(p);
-+ if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
-+ return 0;
-+ fp = *(u64 *)(p->thread.rsp);
-+ do {
-+ if (fp < (unsigned long)stack ||
-+ fp > (unsigned long)stack+THREAD_SIZE)
-+ return 0;
-+ rip = *(u64 *)(fp+8);
-+ if (!in_sched_functions(rip))
-+ return rip;
-+ fp = *(u64 *)fp;
-+ } while (count++ < 16);
-+ return 0;
-+}
-+
-+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
-+{
-+ int ret = 0;
-+ int doit = task == current;
-+ int cpu;
-+
-+ switch (code) {
-+ case ARCH_SET_GS:
-+ if (addr >= TASK_SIZE_OF(task))
-+ return -EPERM;
-+ cpu = get_cpu();
-+ /* handle small bases via the GDT because that's faster to
-+ switch. */
-+ if (addr <= 0xffffffff) {
-+ set_32bit_tls(task, GS_TLS, addr);
-+ if (doit) {
-+ load_TLS(&task->thread, cpu);
-+ load_gs_index(GS_TLS_SEL);
-+ }
-+ task->thread.gsindex = GS_TLS_SEL;
-+ task->thread.gs = 0;
-+ } else {
-+ task->thread.gsindex = 0;
-+ task->thread.gs = addr;
-+ if (doit) {
-+ load_gs_index(0);
-+ ret = HYPERVISOR_set_segment_base(
-+ SEGBASE_GS_USER, addr);
-+ }
-+ }
-+ put_cpu();
-+ break;
-+ case ARCH_SET_FS:
-+ /* Not strictly needed for fs, but do it for symmetry
-+ with gs */
-+ if (addr >= TASK_SIZE_OF(task))
-+ return -EPERM;
-+ cpu = get_cpu();
-+ /* handle small bases via the GDT because that's faster to
-+ switch. */
-+ if (addr <= 0xffffffff) {
-+ set_32bit_tls(task, FS_TLS, addr);
-+ if (doit) {
-+ load_TLS(&task->thread, cpu);
-+ asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
-+ }
-+ task->thread.fsindex = FS_TLS_SEL;
-+ task->thread.fs = 0;
-+ } else {
-+ task->thread.fsindex = 0;
-+ task->thread.fs = addr;
-+ if (doit) {
-+ /* set the selector to 0 to not confuse
-+ __switch_to */
-+ asm volatile("movl %0,%%fs" :: "r" (0));
-+ ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
-+ addr);
-+ }
-+ }
-+ put_cpu();
-+ break;
-+ case ARCH_GET_FS: {
-+ unsigned long base;
-+ if (task->thread.fsindex == FS_TLS_SEL)
-+ base = read_32bit_tls(task, FS_TLS);
-+ else if (doit)
-+ rdmsrl(MSR_FS_BASE, base);
-+ else
-+ base = task->thread.fs;
-+ ret = put_user(base, (unsigned long __user *)addr);
-+ break;
-+ }
-+ case ARCH_GET_GS: {
-+ unsigned long base;
-+ if (task->thread.gsindex == GS_TLS_SEL)
-+ base = read_32bit_tls(task, GS_TLS);
-+ else if (doit)
-+ rdmsrl(MSR_KERNEL_GS_BASE, base);
-+ else
-+ base = task->thread.gs;
-+ ret = put_user(base, (unsigned long __user *)addr);
-+ break;
-+ }
-+
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+long sys_arch_prctl(int code, unsigned long addr)
-+{
-+ return do_arch_prctl(current, code, addr);
-+}
-+
-+/*
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-+{
-+ struct pt_regs *pp, ptregs;
-+
-+ pp = task_pt_regs(tsk);
-+
-+ ptregs = *pp;
-+ ptregs.cs &= 0xffff;
-+ ptregs.ss &= 0xffff;
-+
-+ elf_core_copy_regs(regs, &ptregs);
-+
-+ boot_option_idle_override = 1;
-+ return 1;
-+}
-+
-+unsigned long arch_align_stack(unsigned long sp)
-+{
-+ if (randomize_va_space)
-+ sp -= get_random_int() % 8192;
-+ return sp & ~0xf;
-+}
-+
-+#ifndef CONFIG_SMP
-+void _restore_vcpu(void)
-+{
-+}
-+#endif
-diff --git a/arch/x86_64/kernel/setup-xen.c b/arch/x86_64/kernel/setup-xen.c
-new file mode 100644
-index 0000000..be0853d
---- /dev/null
-+++ b/arch/x86_64/kernel/setup-xen.c
-@@ -0,0 +1,1619 @@
-+/*
-+ * linux/arch/x86-64/kernel/setup.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ *
-+ * Nov 2001 Dave Jones <davej@suse.de>
-+ * Forked from i386 setup code.
-+ *
-+ * $Id$
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of initialization
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/stddef.h>
-+#include <linux/unistd.h>
-+#include <linux/ptrace.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/tty.h>
-+#include <linux/ioport.h>
-+#include <linux/delay.h>
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/initrd.h>
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>
-+#include <linux/module.h>
-+#include <asm/processor.h>
-+#include <linux/console.h>
-+#include <linux/seq_file.h>
-+#include <linux/crash_dump.h>
-+#include <linux/root_dev.h>
-+#include <linux/pci.h>
-+#include <linux/acpi.h>
-+#include <linux/kallsyms.h>
-+#include <linux/edd.h>
-+#include <linux/mmzone.h>
-+#include <linux/kexec.h>
-+#include <linux/cpufreq.h>
-+#include <linux/dmi.h>
-+#include <linux/dma-mapping.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/msr.h>
-+#include <asm/desc.h>
-+#include <video/edid.h>
-+#include <asm/e820.h>
-+#include <asm/dma.h>
-+#include <asm/mpspec.h>
-+#include <asm/mmu_context.h>
-+#include <asm/bootsetup.h>
-+#include <asm/proto.h>
-+#include <asm/setup.h>
-+#include <asm/mach_apic.h>
-+#include <asm/numa.h>
-+#include <asm/swiotlb.h>
-+#include <asm/sections.h>
-+#include <asm/gart-mapping.h>
-+#ifdef CONFIG_XEN
-+#include <linux/percpu.h>
-+#include <xen/interface/physdev.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/nmi.h>
-+#include <xen/features.h>
-+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
-+#define end_pfn_map end_pfn
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
-+
-+extern unsigned long start_pfn;
-+extern struct edid_info edid_info;
-+
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
-+
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
-+
-+/* Allows setting of maximum possible memory size */
-+unsigned long xen_override_max_pfn;
-+
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
-+
-+EXPORT_SYMBOL(phys_to_machine_mapping);
-+
-+DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-+DEFINE_PER_CPU(int, nr_multicall_ents);
-+
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
-+#endif
-+
-+/*
-+ * Machine setup..
-+ */
-+
-+struct cpuinfo_x86 boot_cpu_data __read_mostly;
-+
-+unsigned long mmu_cr4_features;
-+
-+int acpi_disabled;
-+EXPORT_SYMBOL(acpi_disabled);
-+#ifdef CONFIG_ACPI
-+extern int __initdata acpi_ht;
-+extern acpi_interrupt_flags acpi_sci_flags;
-+int __initdata acpi_force = 0;
-+#endif
-+
-+int acpi_numa __initdata;
-+
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type = 0x4e4558; /* XXX get proper XEN boot loader ID */
-+
-+/*
-+ * Setup options
-+ */
-+struct screen_info screen_info;
-+struct sys_desc_table_struct {
-+ unsigned short length;
-+ unsigned char table[0];
-+};
-+
-+#ifdef EDID_INFO
-+struct edid_info edid_info;
-+#endif
-+struct e820map e820;
-+
-+extern int root_mountflags;
-+#ifndef MOUNT_ROOT_RDONLY
-+#define MOUNT_ROOT_RDONLY 1
-+#endif
-+
-+char command_line[COMMAND_LINE_SIZE];
-+
-+struct resource standard_io_resources[] = {
-+ { .name = "dma1", .start = 0x00, .end = 0x1f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "pic1", .start = 0x20, .end = 0x21,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "timer0", .start = 0x40, .end = 0x43,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "timer1", .start = 0x50, .end = 0x53,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "keyboard", .start = 0x60, .end = 0x6f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "pic2", .start = 0xa0, .end = 0xa1,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "dma2", .start = 0xc0, .end = 0xdf,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+ { .name = "fpu", .start = 0xf0, .end = 0xff,
-+ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
-+};
-+
-+#define STANDARD_IO_RESOURCES \
-+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
-+
-+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
-+
-+struct resource data_resource = {
-+ .name = "Kernel data",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_RAM,
-+};
-+struct resource code_resource = {
-+ .name = "Kernel code",
-+ .start = 0,
-+ .end = 0,
-+ .flags = IORESOURCE_RAM,
-+};
-+
-+#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
-+static struct resource system_rom_resource = {
-+ .name = "System ROM",
-+ .start = 0xf0000,
-+ .end = 0xfffff,
-+ .flags = IORESOURCE_ROM,
-+};
-+
-+static struct resource extension_rom_resource = {
-+ .name = "Extension ROM",
-+ .start = 0xe0000,
-+ .end = 0xeffff,
-+ .flags = IORESOURCE_ROM,
-+};
-+
-+static struct resource adapter_rom_resources[] = {
-+ { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
-+ .flags = IORESOURCE_ROM },
-+ { .name = "Adapter ROM", .start = 0, .end = 0,
-+ .flags = IORESOURCE_ROM },
-+ { .name = "Adapter ROM", .start = 0, .end = 0,
-+ .flags = IORESOURCE_ROM },
-+ { .name = "Adapter ROM", .start = 0, .end = 0,
-+ .flags = IORESOURCE_ROM },
-+ { .name = "Adapter ROM", .start = 0, .end = 0,
-+ .flags = IORESOURCE_ROM },
-+ { .name = "Adapter ROM", .start = 0, .end = 0,
-+ .flags = IORESOURCE_ROM }
-+};
-+#endif
-+
-+#define ADAPTER_ROM_RESOURCES \
-+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
-+static struct resource video_rom_resource = {
-+ .name = "Video ROM",
-+ .start = 0xc0000,
-+ .end = 0xc7fff,
-+ .flags = IORESOURCE_ROM,
-+};
-+#endif
-+
-+static struct resource video_ram_resource = {
-+ .name = "Video RAM area",
-+ .start = 0xa0000,
-+ .end = 0xbffff,
-+ .flags = IORESOURCE_RAM,
-+};
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
-+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-+
-+static int __init romchecksum(unsigned char *rom, unsigned long length)
-+{
-+ unsigned char *p, sum = 0;
-+
-+ for (p = rom; p < rom + length; p++)
-+ sum += *p;
-+ return sum == 0;
-+}
-+
-+static void __init probe_roms(void)
-+{
-+ unsigned long start, length, upper;
-+ unsigned char *rom;
-+ int i;
-+
-+ /* video rom */
-+ upper = adapter_rom_resources[0].start;
-+ for (start = video_rom_resource.start; start < upper; start += 2048) {
-+ rom = isa_bus_to_virt(start);
-+ if (!romsignature(rom))
-+ continue;
-+
-+ video_rom_resource.start = start;
-+
-+ /* 0 < length <= 0x7f * 512, historically */
-+ length = rom[2] * 512;
-+
-+ /* if checksum okay, trust length byte */
-+ if (length && romchecksum(rom, length))
-+ video_rom_resource.end = start + length - 1;
-+
-+ request_resource(&iomem_resource, &video_rom_resource);
-+ break;
-+ }
-+
-+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-+ if (start < upper)
-+ start = upper;
-+
-+ /* system rom */
-+ request_resource(&iomem_resource, &system_rom_resource);
-+ upper = system_rom_resource.start;
-+
-+ /* check for extension rom (ignore length byte!) */
-+ rom = isa_bus_to_virt(extension_rom_resource.start);
-+ if (romsignature(rom)) {
-+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
-+ if (romchecksum(rom, length)) {
-+ request_resource(&iomem_resource, &extension_rom_resource);
-+ upper = extension_rom_resource.start;
-+ }
-+ }
-+
-+ /* check for adapter roms on 2k boundaries */
-+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
-+ rom = isa_bus_to_virt(start);
-+ if (!romsignature(rom))
-+ continue;
-+
-+ /* 0 < length <= 0x7f * 512, historically */
-+ length = rom[2] * 512;
-+
-+ /* but accept any length that fits if checksum okay */
-+ if (!length || start + length > upper || !romchecksum(rom, length))
-+ continue;
-+
-+ adapter_rom_resources[i].start = start;
-+ adapter_rom_resources[i].end = start + length - 1;
-+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
-+
-+ start = adapter_rom_resources[i++].end & ~2047UL;
-+ }
-+}
-+#endif
-+
-+static __init void parse_cmdline_early (char ** cmdline_p)
-+{
-+ char c = ' ', *to = command_line, *from = COMMAND_LINE;
-+ int len = 0;
-+ int userdef = 0;
-+
-+ for (;;) {
-+ if (c != ' ')
-+ goto next_char;
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * If the BIOS enumerates physical processors before logical,
-+ * maxcpus=N at enumeration-time can be used to disable HT.
-+ */
-+ else if (!memcmp(from, "maxcpus=", 8)) {
-+ extern unsigned int maxcpus;
-+
-+ maxcpus = simple_strtoul(from + 8, NULL, 0);
-+ }
-+#endif
-+#ifdef CONFIG_ACPI
-+ /* "acpi=off" disables both ACPI table parsing and interpreter init */
-+ if (!memcmp(from, "acpi=off", 8))
-+ disable_acpi();
-+
-+ if (!memcmp(from, "acpi=force", 10)) {
-+ /* add later when we do DMI horrors: */
-+ acpi_force = 1;
-+ acpi_disabled = 0;
-+ }
-+
-+ /* acpi=ht just means: do ACPI MADT parsing
-+ at bootup, but don't enable the full ACPI interpreter */
-+ if (!memcmp(from, "acpi=ht", 7)) {
-+ if (!acpi_force)
-+ disable_acpi();
-+ acpi_ht = 1;
-+ }
-+ else if (!memcmp(from, "pci=noacpi", 10))
-+ acpi_disable_pci();
-+ else if (!memcmp(from, "acpi=noirq", 10))
-+ acpi_noirq_set();
-+
-+ else if (!memcmp(from, "acpi_sci=edge", 13))
-+ acpi_sci_flags.trigger = 1;
-+ else if (!memcmp(from, "acpi_sci=level", 14))
-+ acpi_sci_flags.trigger = 3;
-+ else if (!memcmp(from, "acpi_sci=high", 13))
-+ acpi_sci_flags.polarity = 1;
-+ else if (!memcmp(from, "acpi_sci=low", 12))
-+ acpi_sci_flags.polarity = 3;
-+
-+ /* acpi=strict disables out-of-spec workarounds */
-+ else if (!memcmp(from, "acpi=strict", 11)) {
-+ acpi_strict = 1;
-+ }
-+#ifdef CONFIG_X86_IO_APIC
-+ else if (!memcmp(from, "acpi_skip_timer_override", 24))
-+ acpi_skip_timer_override = 1;
-+#endif
-+#endif
-+
-+#ifndef CONFIG_XEN
-+ if (!memcmp(from, "nolapic", 7) ||
-+ !memcmp(from, "disableapic", 11))
-+ disable_apic = 1;
-+
-+ /* Don't confuse with noapictimer */
-+ if (!memcmp(from, "noapic", 6) &&
-+ (from[6] == ' ' || from[6] == 0))
-+ skip_ioapic_setup = 1;
-+
-+ /* Make sure to not confuse with apic= */
-+ if (!memcmp(from, "apic", 4) &&
-+ (from[4] == ' ' || from[4] == 0)) {
-+ skip_ioapic_setup = 0;
-+ ioapic_force = 1;
-+ }
-+#endif
-+
-+ if (!memcmp(from, "mem=", 4))
-+ parse_memopt(from+4, &from);
-+
-+ if (!memcmp(from, "memmap=", 7)) {
-+ /* exactmap option is for used defined memory */
-+ if (!memcmp(from+7, "exactmap", 8)) {
-+#ifdef CONFIG_CRASH_DUMP
-+ /* If we are doing a crash dump, we
-+ * still need to know the real mem
-+ * size before original memory map is
-+ * reset.
-+ */
-+ saved_max_pfn = e820_end_of_ram();
-+#endif
-+ from += 8+7;
-+ end_pfn_map = 0;
-+ e820.nr_map = 0;
-+ userdef = 1;
-+ }
-+ else {
-+ parse_memmapopt(from+7, &from);
-+ userdef = 1;
-+ }
-+ }
-+
-+#ifdef CONFIG_NUMA
-+ if (!memcmp(from, "numa=", 5))
-+ numa_setup(from+5);
-+#endif
-+
-+ if (!memcmp(from,"iommu=",6)) {
-+ iommu_setup(from+6);
-+ }
-+
-+ if (!memcmp(from,"oops=panic", 10))
-+ panic_on_oops = 1;
-+
-+ if (!memcmp(from, "noexec=", 7))
-+ nonx_setup(from + 7);
-+
-+#ifdef CONFIG_KEXEC
-+ /* crashkernel=size@addr specifies the location to reserve for
-+ * a crash kernel. By reserving this memory we guarantee
-+ * that linux never set's it up as a DMA target.
-+ * Useful for holding code to do something appropriate
-+ * after a kernel panic.
-+ */
-+ else if (!memcmp(from, "crashkernel=", 12)) {
-+ unsigned long size, base;
-+ size = memparse(from+12, &from);
-+ if (*from == '@') {
-+ base = memparse(from+1, &from);
-+ /* FIXME: Do I want a sanity check
-+ * to validate the memory range?
-+ */
-+ crashk_res.start = base;
-+ crashk_res.end = base + size - 1;
-+ }
-+ }
-+#endif
-+
-+#ifdef CONFIG_PROC_VMCORE
-+ /* elfcorehdr= specifies the location of elf core header
-+ * stored by the crashed kernel. This option will be passed
-+ * by kexec loader to the capture kernel.
-+ */
-+ else if(!memcmp(from, "elfcorehdr=", 11))
-+ elfcorehdr_addr = memparse(from+11, &from);
-+#endif
-+
-+#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
-+ else if (!memcmp(from, "additional_cpus=", 16))
-+ setup_additional_cpus(from+16);
-+#endif
-+
-+ next_char:
-+ c = *(from++);
-+ if (!c)
-+ break;
-+ if (COMMAND_LINE_SIZE <= ++len)
-+ break;
-+ *(to++) = c;
-+ }
-+ if (userdef) {
-+ printk(KERN_INFO "user-defined physical RAM map:\n");
-+ e820_print_map("user");
-+ }
-+ *to = '\0';
-+ *cmdline_p = command_line;
-+}
-+
-+#ifndef CONFIG_NUMA
-+#ifdef CONFIG_XEN
-+static void __init
-+contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
-+{
-+ unsigned long bootmap_size;
-+
-+ bootmap_size = init_bootmem(start_pfn, end_pfn);
-+ free_bootmem(0, xen_start_info->nr_pages << PAGE_SHIFT);
-+ reserve_bootmem(HIGH_MEMORY,
-+ (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
-+ - HIGH_MEMORY);
-+}
-+#else
-+static void __init
-+contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
-+{
-+ unsigned long bootmap_size, bootmap;
-+
-+ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
-+ bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
-+ if (bootmap == -1L)
-+ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
-+ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
-+ e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
-+ reserve_bootmem(bootmap, bootmap_size);
-+}
-+#endif /* !CONFIG_XEN */
-+#endif
-+
-+/* Use inline assembly to define this because the nops are defined
-+ as inline assembly strings in the include files and we cannot
-+ get them easily into strings. */
-+asm("\t.data\nk8nops: "
-+ K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
-+ K8_NOP7 K8_NOP8);
-+
-+extern unsigned char k8nops[];
-+static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
-+ NULL,
-+ k8nops,
-+ k8nops + 1,
-+ k8nops + 1 + 2,
-+ k8nops + 1 + 2 + 3,
-+ k8nops + 1 + 2 + 3 + 4,
-+ k8nops + 1 + 2 + 3 + 4 + 5,
-+ k8nops + 1 + 2 + 3 + 4 + 5 + 6,
-+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-+};
-+
-+extern char __vsyscall_0;
-+
-+/* Replace instructions with better alternatives for this CPU type.
-+
-+ This runs before SMP is initialized to avoid SMP problems with
-+ self modifying code. This implies that assymetric systems where
-+ APs have less capabilities than the boot processor are not handled.
-+ In this case boot with "noreplacement". */
-+void apply_alternatives(void *start, void *end)
-+{
-+ struct alt_instr *a;
-+ int diff, i, k;
-+ for (a = start; (void *)a < end; a++) {
-+ u8 *instr;
-+
-+ if (!boot_cpu_has(a->cpuid))
-+ continue;
-+
-+ BUG_ON(a->replacementlen > a->instrlen);
-+ instr = a->instr;
-+ /* vsyscall code is not mapped yet. resolve it manually. */
-+ if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
-+ instr -= VSYSCALL_START - (unsigned long)&__vsyscall_0;
-+ __inline_memcpy(instr, a->replacement, a->replacementlen);
-+ diff = a->instrlen - a->replacementlen;
-+
-+ /* Pad the rest with nops */
-+ for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
-+ k = diff;
-+ if (k > ASM_NOP_MAX)
-+ k = ASM_NOP_MAX;
-+ __inline_memcpy(instr + i, k8_nops[k], k);
-+ }
-+ }
-+}
-+
-+static int no_replacement __initdata = 0;
-+
-+void __init alternative_instructions(void)
-+{
-+ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-+ if (no_replacement)
-+ return;
-+ apply_alternatives(__alt_instructions, __alt_instructions_end);
-+}
-+
-+static int __init noreplacement_setup(char *s)
-+{
-+ no_replacement = 1;
-+ return 0;
-+}
-+
-+__setup("noreplacement", noreplacement_setup);
-+
-+#ifndef CONFIG_XEN
-+#define EBDA_ADDR_POINTER 0x40E
-+static void __init reserve_ebda_region(void)
-+{
-+ unsigned int addr;
-+ /**
-+ * there is a real-mode segmented pointer pointing to the
-+ * 4K EBDA area at 0x40E
-+ */
-+ addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
-+ addr <<= 4;
-+ if (addr)
-+ reserve_bootmem_generic(addr, PAGE_SIZE);
-+}
-+#endif
-+
-+void __init setup_arch(char **cmdline_p)
-+{
-+ unsigned long kernel_end;
-+
-+ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
-+ kernel_end = 0; /* dummy */
-+#ifdef SCREEN_INFO
-+ screen_info = SCREEN_INFO;
-+#endif
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN) {
-+ /* This is drawn from a dump from vgacon:startup in
-+ * standard Linux. */
-+ screen_info.orig_video_mode = 3;
-+ screen_info.orig_video_isVGA = 1;
-+ screen_info.orig_video_lines = 25;
-+ screen_info.orig_video_cols = 80;
-+ screen_info.orig_video_ega_bx = 3;
-+ screen_info.orig_video_points = 16;
-+ }
-+
-+#ifdef EDID_INFO
-+ edid_info = EDID_INFO;
-+#endif
-+#ifdef SAVED_VIDEO_MODE
-+ saved_video_mode = SAVED_VIDEO_MODE;
-+#endif
-+#ifdef LOADER_TYPE
-+ bootloader_type = LOADER_TYPE;
-+#endif
-+
-+#if defined(CONFIG_BLK_DEV_RAM) && defined(RAMDISK_FLAGS)
-+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
-+
-+#ifdef CONFIG_XEN
-+ setup_xen_features();
-+
-+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+ VMASST_TYPE_writable_pagetables);
-+
-+ HYPERVISOR_set_callbacks(
-+ (unsigned long) hypervisor_callback,
-+ (unsigned long) failsafe_callback,
-+ (unsigned long) system_call);
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ HYPERVISOR_nmi_op(XENNMI_register_callback, (unsigned long)&nmi);
-+#endif
-+
-+ modify_cpu_capabilities(&boot_cpu_data);
-+#endif
-+
-+ if (!MOUNT_ROOT_RDONLY)
-+ root_mountflags &= ~MS_RDONLY;
-+ init_mm.start_code = (unsigned long) &_text;
-+ init_mm.end_code = (unsigned long) &_etext;
-+ init_mm.end_data = (unsigned long) &_edata;
-+#ifdef CONFIG_XEN
-+ init_mm.brk = start_pfn << PAGE_SHIFT;
-+#else
-+ init_mm.brk = (unsigned long) &_end;
-+
-+ code_resource.start = virt_to_phys(&_text);
-+ code_resource.end = virt_to_phys(&_etext)-1;
-+ data_resource.start = virt_to_phys(&_etext);
-+ data_resource.end = virt_to_phys(&_edata)-1;
-+#endif
-+
-+ parse_cmdline_early(cmdline_p);
-+
-+ early_identify_cpu(&boot_cpu_data);
-+
-+ /*
-+ * partially used pages are not usable - thus
-+ * we are rounding upwards:
-+ */
-+ end_pfn = e820_end_of_ram();
-+
-+ check_efer();
-+
-+ init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
-+
-+#ifdef CONFIG_ACPI_NUMA
-+ /*
-+ * Parse SRAT to discover nodes.
-+ */
-+ acpi_numa_init();
-+#endif
-+
-+#ifdef CONFIG_NUMA
-+ numa_initmem_init(start_pfn, end_pfn);
-+#else
-+ contig_initmem_init(start_pfn, end_pfn);
-+#endif
-+
-+#ifndef CONFIG_XEN
-+ /* Reserve direct mapping */
-+ reserve_bootmem_generic(table_start << PAGE_SHIFT,
-+ (table_end - table_start) << PAGE_SHIFT);
-+
-+ /* reserve kernel */
-+ kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
-+ reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
-+
-+ /*
-+ * reserve physical page 0 - it's a special BIOS page on many boxes,
-+ * enabling clean reboots, SMP operation, laptop functions.
-+ */
-+ reserve_bootmem_generic(0, PAGE_SIZE);
-+
-+ /* reserve ebda region */
-+ reserve_ebda_region();
-+#endif
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * But first pinch a few for the stack/trampoline stuff
-+ * FIXME: Don't need the extra page at 4K, but need to fix
-+ * trampoline before removing it. (see the GDT stuff)
-+ */
-+ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
-+
-+ /* Reserve SMP trampoline */
-+ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
-+#endif
-+
-+#ifdef CONFIG_ACPI_SLEEP
-+ /*
-+ * Reserve low memory region for sleep support.
-+ */
-+ acpi_reserve_bootmem();
-+#endif
-+#ifdef CONFIG_XEN
-+#ifdef CONFIG_BLK_DEV_INITRD
-+ if (xen_start_info->mod_start) {
-+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+ /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
-+ initrd_start = INITRD_START + PAGE_OFFSET;
-+ initrd_end = initrd_start+INITRD_SIZE;
-+ initrd_below_start_ok = 1;
-+ } else {
-+ printk(KERN_ERR "initrd extends beyond end of memory "
-+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+ (unsigned long)(INITRD_START + INITRD_SIZE),
-+ (unsigned long)(end_pfn << PAGE_SHIFT));
-+ initrd_start = 0;
-+ }
-+ }
-+#endif
-+#else /* CONFIG_XEN */
-+#ifdef CONFIG_BLK_DEV_INITRD
-+ if (LOADER_TYPE && INITRD_START) {
-+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+ reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
-+ initrd_start =
-+ INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
-+ initrd_end = initrd_start+INITRD_SIZE;
-+ }
-+ else {
-+ printk(KERN_ERR "initrd extends beyond end of memory "
-+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+ (unsigned long)(INITRD_START + INITRD_SIZE),
-+ (unsigned long)(end_pfn << PAGE_SHIFT));
-+ initrd_start = 0;
-+ }
-+ }
-+#endif
-+#endif /* !CONFIG_XEN */
-+#ifdef CONFIG_KEXEC
-+ if (crashk_res.start != crashk_res.end) {
-+ reserve_bootmem(crashk_res.start,
-+ crashk_res.end - crashk_res.start + 1);
-+ }
-+#endif
-+
-+ paging_init();
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ /*
-+ * Find and reserve possible boot-time SMP configuration:
-+ */
-+ find_smp_config();
-+#endif
-+#ifdef CONFIG_XEN
-+ {
-+ int i, j, k, fpp;
-+ unsigned long va;
-+
-+ /* 'Initial mapping' of initrd must be destroyed. */
-+ for (va = xen_start_info->mod_start;
-+ va < (xen_start_info->mod_start+xen_start_info->mod_len);
-+ va += PAGE_SIZE) {
-+ HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+ }
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Make sure we have a large enough P->M table. */
-+ phys_to_machine_mapping = alloc_bootmem(
-+ end_pfn * sizeof(unsigned long));
-+ memset(phys_to_machine_mapping, ~0,
-+ end_pfn * sizeof(unsigned long));
-+ memcpy(phys_to_machine_mapping,
-+ (unsigned long *)xen_start_info->mfn_list,
-+ xen_start_info->nr_pages * sizeof(unsigned long));
-+ free_bootmem(
-+ __pa(xen_start_info->mfn_list),
-+ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+ sizeof(unsigned long))));
-+
-+ /* Destroyed 'initial mapping' of old p2m table. */
-+ for (va = xen_start_info->mfn_list;
-+ va < (xen_start_info->mfn_list +
-+ (xen_start_info->nr_pages*sizeof(unsigned long)));
-+ va += PAGE_SIZE) {
-+ HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+ }
-+
-+ /*
-+ * Initialise the list of the frames that specify the
-+ * list of frames that make up the p2m table. Used by
-+ * save/restore.
-+ */
-+ pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
-+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+ virt_to_mfn(pfn_to_mfn_frame_list_list);
-+
-+ fpp = PAGE_SIZE/sizeof(unsigned long);
-+ for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
-+ if ((j % fpp) == 0) {
-+ k++;
-+ BUG_ON(k>=fpp);
-+ pfn_to_mfn_frame_list[k] =
-+ alloc_bootmem(PAGE_SIZE);
-+ pfn_to_mfn_frame_list_list[k] =
-+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+ j=0;
-+ }
-+ pfn_to_mfn_frame_list[k][j] =
-+ virt_to_mfn(&phys_to_machine_mapping[i]);
-+ }
-+ HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
-+ }
-+
-+ }
-+
-+ if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
-+ {
-+ acpi_disabled = 1;
-+#ifdef CONFIG_ACPI
-+ acpi_ht = 0;
-+#endif
-+ }
-+#endif
-+
-+#ifndef CONFIG_XEN
-+ check_ioapic();
-+#endif
-+
-+ zap_low_mappings(0);
-+
-+#ifdef CONFIG_ACPI
-+ /*
-+ * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
-+ * Call this early for SRAT node setup.
-+ */
-+ acpi_boot_table_init();
-+
-+ /*
-+ * Read APIC and some other early information from ACPI tables.
-+ */
-+ acpi_boot_init();
-+#endif
-+
-+ init_cpu_to_node();
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ /*
-+ * get boot-time SMP configuration:
-+ */
-+ if (smp_found_config)
-+ get_smp_config();
-+#ifndef CONFIG_XEN
-+ init_apic_mappings();
-+#endif
-+#endif
-+#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
-+ prefill_possible_map();
-+#endif
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
-+ /*
-+ * Request address space for all standard RAM and ROM resources
-+ * and also for regions reported as reserved by the e820.
-+ */
-+ probe_roms();
-+ e820_reserve_resources();
-+#endif
-+
-+ request_resource(&iomem_resource, &video_ram_resource);
-+
-+ {
-+ unsigned i;
-+ /* request I/O space for devices used on all i[345]86 PCs */
-+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-+ request_resource(&ioport_resource, &standard_io_resources[i]);
-+ }
-+
-+ e820_setup_gap();
-+
-+#ifdef CONFIG_GART_IOMMU
-+ iommu_hole_init();
-+#endif
-+
-+#ifdef CONFIG_XEN
-+ {
-+ physdev_op_t op;
-+
-+ op.cmd = PHYSDEVOP_SET_IOPL;
-+ op.u.set_iopl.iopl = 1;
-+ HYPERVISOR_physdev_op(&op);
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN) {
-+ if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+ panic("Xen granted us console access "
-+ "but not privileged status");
-+
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+ conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+ conswitchp = &dummy_con;
-+#endif
-+#endif
-+ } else {
-+ extern int console_use_vt;
-+ console_use_vt = 0;
-+ }
-+ }
-+#else /* CONFIG_XEN */
-+
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+ conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+ conswitchp = &dummy_con;
-+#endif
-+#endif
-+
-+#endif /* !CONFIG_XEN */
-+}
-+
-+static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
-+{
-+ unsigned int *v;
-+
-+ if (c->extended_cpuid_level < 0x80000004)
-+ return 0;
-+
-+ v = (unsigned int *) c->x86_model_id;
-+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+ c->x86_model_id[48] = 0;
-+ return 1;
-+}
-+
-+
-+static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
-+{
-+ unsigned int n, dummy, eax, ebx, ecx, edx;
-+
-+ n = c->extended_cpuid_level;
-+
-+ if (n >= 0x80000005) {
-+ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
-+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+ c->x86_cache_size=(ecx>>24)+(edx>>24);
-+ /* On K8 L1 TLB is inclusive, so don't count it */
-+ c->x86_tlbsize = 0;
-+ }
-+
-+ if (n >= 0x80000006) {
-+ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
-+ ecx = cpuid_ecx(0x80000006);
-+ c->x86_cache_size = ecx >> 16;
-+ c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
-+
-+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+ c->x86_cache_size, ecx & 0xFF);
-+ }
-+
-+ if (n >= 0x80000007)
-+ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
-+ if (n >= 0x80000008) {
-+ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
-+ c->x86_virt_bits = (eax >> 8) & 0xff;
-+ c->x86_phys_bits = eax & 0xff;
-+ }
-+}
-+
-+#ifdef CONFIG_NUMA
-+static int nearby_node(int apicid)
-+{
-+ int i;
-+ for (i = apicid - 1; i >= 0; i--) {
-+ int node = apicid_to_node[i];
-+ if (node != NUMA_NO_NODE && node_online(node))
-+ return node;
-+ }
-+ for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
-+ int node = apicid_to_node[i];
-+ if (node != NUMA_NO_NODE && node_online(node))
-+ return node;
-+ }
-+ return first_node(node_online_map); /* Shouldn't happen */
-+}
-+#endif
-+
-+/*
-+ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
-+ * Assumes number of cores is a power of two.
-+ */
-+static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_SMP
-+ int cpu = smp_processor_id();
-+ unsigned bits;
-+#ifdef CONFIG_NUMA
-+ int node = 0;
-+ unsigned apicid = phys_proc_id[cpu];
-+#endif
-+
-+ bits = 0;
-+ while ((1 << bits) < c->x86_max_cores)
-+ bits++;
-+
-+ /* Low order bits define the core id (index of core in socket) */
-+ cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
-+ /* Convert the APIC ID into the socket ID */
-+ phys_proc_id[cpu] >>= bits;
-+
-+#ifdef CONFIG_NUMA
-+ node = phys_proc_id[cpu];
-+ if (apicid_to_node[apicid] != NUMA_NO_NODE)
-+ node = apicid_to_node[apicid];
-+ if (!node_online(node)) {
-+ /* Two possibilities here:
-+ - The CPU is missing memory and no node was created.
-+ In that case try picking one from a nearby CPU
-+ - The APIC IDs differ from the HyperTransport node IDs
-+ which the K8 northbridge parsing fills in.
-+ Assume they are all increased by a constant offset,
-+ but in the same order as the HT nodeids.
-+ If that doesn't result in a usable node fall back to the
-+ path for the previous case. */
-+ int ht_nodeid = apicid - (phys_proc_id[0] << bits);
-+ if (ht_nodeid >= 0 &&
-+ apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
-+ node = apicid_to_node[ht_nodeid];
-+ /* Pick a nearby node */
-+ if (!node_online(node))
-+ node = nearby_node(apicid);
-+ }
-+ numa_set_node(cpu, node);
-+
-+ printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
-+ cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
-+#endif
-+#endif
-+}
-+
-+static int __init init_amd(struct cpuinfo_x86 *c)
-+{
-+ int r;
-+ unsigned level;
-+
-+#ifdef CONFIG_SMP
-+ unsigned long value;
-+
-+ /*
-+ * Disable TLB flush filter by setting HWCR.FFDIS on K8
-+ * bit 6 of msr C001_0015
-+ *
-+ * Errata 63 for SH-B3 steppings
-+ * Errata 122 for all steppings (F+ have it disabled by default)
-+ */
-+ if (c->x86 == 15) {
-+ rdmsrl(MSR_K8_HWCR, value);
-+ value |= 1 << 6;
-+ wrmsrl(MSR_K8_HWCR, value);
-+ }
-+#endif
-+
-+ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-+ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-+ clear_bit(0*32+31, &c->x86_capability);
-+
-+ /* On C+ stepping K8 rep microcode works well for copy/memset */
-+ level = cpuid_eax(1);
-+ if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
-+ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
-+
-+ r = get_model_name(c);
-+ if (!r) {
-+ switch (c->x86) {
-+ case 15:
-+ /* Should distinguish Models here, but this is only
-+ a fallback anyways. */
-+ strcpy(c->x86_model_id, "Hammer");
-+ break;
-+ }
-+ }
-+ display_cacheinfo(c);
-+
-+ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
-+ if (c->x86_power & (1<<8))
-+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+
-+ if (c->extended_cpuid_level >= 0x80000008) {
-+ c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-+ if (c->x86_max_cores & (c->x86_max_cores - 1))
-+ c->x86_max_cores = 1;
-+
-+ amd_detect_cmp(c);
-+ }
-+
-+ return r;
-+}
-+
-+static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_SMP
-+ u32 eax, ebx, ecx, edx;
-+ int index_msb, core_bits;
-+ int cpu = smp_processor_id();
-+
-+ cpuid(1, &eax, &ebx, &ecx, &edx);
-+
-+ c->apicid = phys_pkg_id(0);
-+
-+ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+ return;
-+
-+ smp_num_siblings = (ebx & 0xff0000) >> 16;
-+
-+ if (smp_num_siblings == 1) {
-+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
-+ } else if (smp_num_siblings > 1 ) {
-+
-+ if (smp_num_siblings > NR_CPUS) {
-+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
-+ smp_num_siblings = 1;
-+ return;
-+ }
-+
-+ index_msb = get_count_order(smp_num_siblings);
-+ phys_proc_id[cpu] = phys_pkg_id(index_msb);
-+
-+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
-+ phys_proc_id[cpu]);
-+
-+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
-+
-+ index_msb = get_count_order(smp_num_siblings) ;
-+
-+ core_bits = get_count_order(c->x86_max_cores);
-+
-+ cpu_core_id[cpu] = phys_pkg_id(index_msb) &
-+ ((1 << core_bits) - 1);
-+
-+ if (c->x86_max_cores > 1)
-+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
-+ cpu_core_id[cpu]);
-+ }
-+#endif
-+}
-+
-+/*
-+ * find out the number of processor cores on the die
-+ */
-+static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
-+{
-+ unsigned int eax;
-+
-+ if (c->cpuid_level < 4)
-+ return 1;
-+
-+ __asm__("cpuid"
-+ : "=a" (eax)
-+ : "0" (4), "c" (0)
-+ : "bx", "dx");
-+
-+ if (eax & 0x1f)
-+ return ((eax >> 26) + 1);
-+ else
-+ return 1;
-+}
-+
-+static void srat_detect_node(void)
-+{
-+#ifdef CONFIG_NUMA
-+ unsigned node;
-+ int cpu = smp_processor_id();
-+
-+ /* Don't do the funky fallback heuristics the AMD version employs
-+ for now. */
-+ node = apicid_to_node[hard_smp_processor_id()];
-+ if (node == NUMA_NO_NODE)
-+ node = 0;
-+ numa_set_node(cpu, node);
-+
-+ if (acpi_numa > 0)
-+ printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
-+#endif
-+}
-+
-+static void __cpuinit init_intel(struct cpuinfo_x86 *c)
-+{
-+ /* Cache sizes */
-+ unsigned n;
-+
-+ init_intel_cacheinfo(c);
-+ n = c->extended_cpuid_level;
-+ if (n >= 0x80000008) {
-+ unsigned eax = cpuid_eax(0x80000008);
-+ c->x86_virt_bits = (eax >> 8) & 0xff;
-+ c->x86_phys_bits = eax & 0xff;
-+ /* CPUID workaround for Intel 0F34 CPU */
-+ if (c->x86_vendor == X86_VENDOR_INTEL &&
-+ c->x86 == 0xF && c->x86_model == 0x3 &&
-+ c->x86_mask == 0x4)
-+ c->x86_phys_bits = 36;
-+ }
-+
-+ if (c->x86 == 15)
-+ c->x86_cache_alignment = c->x86_clflush_size * 2;
-+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
-+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-+ c->x86_max_cores = intel_num_cpu_cores(c);
-+
-+ srat_detect_node();
-+}
-+
-+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
-+{
-+ char *v = c->x86_vendor_id;
-+
-+ if (!strcmp(v, "AuthenticAMD"))
-+ c->x86_vendor = X86_VENDOR_AMD;
-+ else if (!strcmp(v, "GenuineIntel"))
-+ c->x86_vendor = X86_VENDOR_INTEL;
-+ else
-+ c->x86_vendor = X86_VENDOR_UNKNOWN;
-+}
-+
-+struct cpu_model_info {
-+ int vendor;
-+ int family;
-+ char *model_names[16];
-+};
-+
-+/* Do some early cpuid on the boot CPU to get some parameter that are
-+ needed before check_bugs. Everything advanced is in identify_cpu
-+ below. */
-+void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
-+{
-+ u32 tfms;
-+
-+ c->loops_per_jiffy = loops_per_jiffy;
-+ c->x86_cache_size = -1;
-+ c->x86_vendor = X86_VENDOR_UNKNOWN;
-+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
-+ c->x86_vendor_id[0] = '\0'; /* Unset */
-+ c->x86_model_id[0] = '\0'; /* Unset */
-+ c->x86_clflush_size = 64;
-+ c->x86_cache_alignment = c->x86_clflush_size;
-+ c->x86_max_cores = 1;
-+ c->extended_cpuid_level = 0;
-+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
-+
-+ /* Get vendor name */
-+ cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
-+ (unsigned int *)&c->x86_vendor_id[0],
-+ (unsigned int *)&c->x86_vendor_id[8],
-+ (unsigned int *)&c->x86_vendor_id[4]);
-+
-+ get_cpu_vendor(c);
-+
-+ /* Initialize the standard set of capabilities */
-+ /* Note that the vendor-specific code below might override */
-+
-+ /* Intel-defined flags: level 0x00000001 */
-+ if (c->cpuid_level >= 0x00000001) {
-+ __u32 misc;
-+ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
-+ &c->x86_capability[0]);
-+ c->x86 = (tfms >> 8) & 0xf;
-+ c->x86_model = (tfms >> 4) & 0xf;
-+ c->x86_mask = tfms & 0xf;
-+ if (c->x86 == 0xf)
-+ c->x86 += (tfms >> 20) & 0xff;
-+ if (c->x86 >= 0x6)
-+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+ if (c->x86_capability[0] & (1<<19))
-+ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
-+ } else {
-+ /* Have CPUID level 0 only - unheard of */
-+ c->x86 = 4;
-+ }
-+
-+#ifdef CONFIG_SMP
-+ phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
-+#endif
-+}
-+
-+/*
-+ * This does the hard work of actually picking apart the CPU stuff...
-+ */
-+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
-+{
-+ int i;
-+ u32 xlvl;
-+
-+ early_identify_cpu(c);
-+
-+ /* AMD-defined flags: level 0x80000001 */
-+ xlvl = cpuid_eax(0x80000000);
-+ c->extended_cpuid_level = xlvl;
-+ if ((xlvl & 0xffff0000) == 0x80000000) {
-+ if (xlvl >= 0x80000001) {
-+ c->x86_capability[1] = cpuid_edx(0x80000001);
-+ c->x86_capability[6] = cpuid_ecx(0x80000001);
-+ }
-+ if (xlvl >= 0x80000004)
-+ get_model_name(c); /* Default name */
-+ }
-+
-+ /* Transmeta-defined flags: level 0x80860001 */
-+ xlvl = cpuid_eax(0x80860000);
-+ if ((xlvl & 0xffff0000) == 0x80860000) {
-+ /* Don't set x86_cpuid_level here for now to not confuse. */
-+ if (xlvl >= 0x80860001)
-+ c->x86_capability[2] = cpuid_edx(0x80860001);
-+ }
-+
-+ /*
-+ * Vendor-specific initialization. In this section we
-+ * canonicalize the feature flags, meaning if there are
-+ * features a certain CPU supports which CPUID doesn't
-+ * tell us, CPUID claiming incorrect flags, or other bugs,
-+ * we handle them here.
-+ *
-+ * At the end of this section, c->x86_capability better
-+ * indicate the features this CPU genuinely supports!
-+ */
-+ switch (c->x86_vendor) {
-+ case X86_VENDOR_AMD:
-+ init_amd(c);
-+ break;
-+
-+ case X86_VENDOR_INTEL:
-+ init_intel(c);
-+ break;
-+
-+ case X86_VENDOR_UNKNOWN:
-+ default:
-+ display_cacheinfo(c);
-+ break;
-+ }
-+
-+ select_idle_routine(c);
-+ detect_ht(c);
-+
-+ modify_cpu_capabilities(c);
-+
-+ /*
-+ * On SMP, boot_cpu_data holds the common feature set between
-+ * all CPUs; so make sure that we indicate which features are
-+ * common between the CPUs. The first time this routine gets
-+ * executed, c == &boot_cpu_data.
-+ */
-+ if (c != &boot_cpu_data) {
-+ /* AND the already accumulated flags with these */
-+ for (i = 0 ; i < NCAPINTS ; i++)
-+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-+ }
-+
-+#ifdef CONFIG_X86_MCE
-+ mcheck_init(c);
-+#endif
-+ if (c == &boot_cpu_data)
-+ mtrr_bp_init();
-+ else
-+ mtrr_ap_init();
-+#ifdef CONFIG_NUMA
-+ numa_add_cpu(smp_processor_id());
-+#endif
-+}
-+
-+
-+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
-+{
-+ if (c->x86_model_id[0])
-+ printk("%s", c->x86_model_id);
-+
-+ if (c->x86_mask || c->cpuid_level >= 0)
-+ printk(" stepping %02x\n", c->x86_mask);
-+ else
-+ printk("\n");
-+}
-+
-+/*
-+ * Get CPU information for use by the procfs.
-+ */
-+
-+static int show_cpuinfo(struct seq_file *m, void *v)
-+{
-+ struct cpuinfo_x86 *c = v;
-+
-+ /*
-+ * These flag bits must match the definitions in <asm/cpufeature.h>.
-+ * NULL means this bit is undefined or reserved; either way it doesn't
-+ * have meaning as far as Linux is concerned. Note that it's important
-+ * to realize there is a difference between this table and CPUID -- if
-+ * applications want to get the raw CPUID data, they should access
-+ * /dev/cpu/<cpu_nr>/cpuid instead.
-+ */
-+ static char *x86_cap_flags[] = {
-+ /* Intel-defined */
-+ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
-+ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
-+ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
-+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
-+
-+ /* AMD-defined */
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
-+ NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
-+
-+ /* Transmeta-defined */
-+ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+ /* Other (Linux-defined) */
-+ "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
-+ "constant_tsc", NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+ /* Intel-defined (#2) */
-+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
-+ "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+ /* VIA/Cyrix/Centaur-defined */
-+ NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+ /* AMD-defined (#2) */
-+ "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+ };
-+ static char *x86_power_flags[] = {
-+ "ts", /* temperature sensor */
-+ "fid", /* frequency id control */
-+ "vid", /* voltage id control */
-+ "ttp", /* thermal trip */
-+ "tm",
-+ "stc",
-+ NULL,
-+ /* nothing */ /* constant_tsc - moved to flags */
-+ };
-+
-+
-+#ifdef CONFIG_SMP
-+ if (!cpu_online(c-cpu_data))
-+ return 0;
-+#endif
-+
-+ seq_printf(m,"processor\t: %u\n"
-+ "vendor_id\t: %s\n"
-+ "cpu family\t: %d\n"
-+ "model\t\t: %d\n"
-+ "model name\t: %s\n",
-+ (unsigned)(c-cpu_data),
-+ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-+ c->x86,
-+ (int)c->x86_model,
-+ c->x86_model_id[0] ? c->x86_model_id : "unknown");
-+
-+ if (c->x86_mask || c->cpuid_level >= 0)
-+ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
-+ else
-+ seq_printf(m, "stepping\t: unknown\n");
-+
-+ if (cpu_has(c,X86_FEATURE_TSC)) {
-+ unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
-+ if (!freq)
-+ freq = cpu_khz;
-+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-+ freq / 1000, (freq % 1000));
-+ }
-+
-+ /* Cache size */
-+ if (c->x86_cache_size >= 0)
-+ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-+
-+#ifdef CONFIG_SMP
-+ if (smp_num_siblings * c->x86_max_cores > 1) {
-+ int cpu = c - cpu_data;
-+ seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
-+ seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
-+ seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
-+ seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
-+ }
-+#endif
-+
-+ seq_printf(m,
-+ "fpu\t\t: yes\n"
-+ "fpu_exception\t: yes\n"
-+ "cpuid level\t: %d\n"
-+ "wp\t\t: yes\n"
-+ "flags\t\t:",
-+ c->cpuid_level);
-+
-+ {
-+ int i;
-+ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
-+ if ( test_bit(i, &c->x86_capability) &&
-+ x86_cap_flags[i] != NULL )
-+ seq_printf(m, " %s", x86_cap_flags[i]);
-+ }
-+
-+ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
-+ c->loops_per_jiffy/(500000/HZ),
-+ (c->loops_per_jiffy/(5000/HZ)) % 100);
-+
-+ if (c->x86_tlbsize > 0)
-+ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
-+ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
-+ seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
-+
-+ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
-+ c->x86_phys_bits, c->x86_virt_bits);
-+
-+ seq_printf(m, "power management:");
-+ {
-+ unsigned i;
-+ for (i = 0; i < 32; i++)
-+ if (c->x86_power & (1 << i)) {
-+ if (i < ARRAY_SIZE(x86_power_flags) &&
-+ x86_power_flags[i])
-+ seq_printf(m, "%s%s",
-+ x86_power_flags[i][0]?" ":"",
-+ x86_power_flags[i]);
-+ else
-+ seq_printf(m, " [%d]", i);
-+ }
-+ }
-+
-+ seq_printf(m, "\n\n");
-+
-+ return 0;
-+}
-+
-+static void *c_start(struct seq_file *m, loff_t *pos)
-+{
-+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
-+}
-+
-+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-+{
-+ ++*pos;
-+ return c_start(m, pos);
-+}
-+
-+static void c_stop(struct seq_file *m, void *v)
-+{
-+}
-+
-+struct seq_operations cpuinfo_op = {
-+ .start =c_start,
-+ .next = c_next,
-+ .stop = c_stop,
-+ .show = show_cpuinfo,
-+};
-+
-+static int __init run_dmi_scan(void)
-+{
-+ dmi_scan_machine();
-+ return 0;
-+}
-+core_initcall(run_dmi_scan);
-+
-diff --git a/arch/x86_64/kernel/setup64-xen.c b/arch/x86_64/kernel/setup64-xen.c
-new file mode 100644
-index 0000000..946c593
---- /dev/null
-+++ b/arch/x86_64/kernel/setup64-xen.c
-@@ -0,0 +1,358 @@
-+/*
-+ * X86-64 specific CPU setup.
-+ * Copyright (C) 1995 Linus Torvalds
-+ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
-+ * See setup.c for older changelog.
-+ * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen
-+ *
-+ */
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/bootmem.h>
-+#include <linux/bitops.h>
-+#include <linux/module.h>
-+#include <asm/bootsetup.h>
-+#include <asm/pda.h>
-+#include <asm/pgtable.h>
-+#include <asm/processor.h>
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/smp.h>
-+#include <asm/i387.h>
-+#include <asm/percpu.h>
-+#include <asm/proto.h>
-+#include <asm/sections.h>
-+#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
-+#endif
-+
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-+
-+struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
-+struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
-+
-+#ifndef CONFIG_X86_NO_IDT
-+struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
-+#endif
-+
-+char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
-+
-+unsigned long __supported_pte_mask __read_mostly = ~0UL;
-+static int do_not_nx __cpuinitdata = 0;
-+
-+/* noexec=on|off
-+Control non executable mappings for 64bit processes.
-+
-+on Enable(default)
-+off Disable
-+*/
-+int __init nonx_setup(char *str)
-+{
-+ if (!strncmp(str, "on", 2)) {
-+ __supported_pte_mask |= _PAGE_NX;
-+ do_not_nx = 0;
-+ } else if (!strncmp(str, "off", 3)) {
-+ do_not_nx = 1;
-+ __supported_pte_mask &= ~_PAGE_NX;
-+ }
-+ return 0;
-+}
-+__setup("noexec=", nonx_setup); /* parsed early actually */
-+
-+int force_personality32 = READ_IMPLIES_EXEC;
-+
-+/* noexec32=on|off
-+Control non executable heap for 32bit processes.
-+To control the stack too use noexec=off
-+
-+on PROT_READ does not imply PROT_EXEC for 32bit processes
-+off PROT_READ implies PROT_EXEC (default)
-+*/
-+static int __init nonx32_setup(char *str)
-+{
-+ if (!strcmp(str, "on"))
-+ force_personality32 &= ~READ_IMPLIES_EXEC;
-+ else if (!strcmp(str, "off"))
-+ force_personality32 |= READ_IMPLIES_EXEC;
-+ return 0;
-+}
-+__setup("noexec32=", nonx32_setup);
-+
-+/*
-+ * Great future plan:
-+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
-+ * Always point %gs to its beginning
-+ */
-+void __init setup_per_cpu_areas(void)
-+{
-+ int i;
-+ unsigned long size;
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+ prefill_possible_map();
-+#endif
-+
-+ /* Copy section for each CPU (we discard the original) */
-+ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
-+#ifdef CONFIG_MODULES
-+ if (size < PERCPU_ENOUGH_ROOM)
-+ size = PERCPU_ENOUGH_ROOM;
-+#endif
-+
-+ for_each_cpu_mask (i, cpu_possible_map) {
-+ char *ptr;
-+
-+ if (!NODE_DATA(cpu_to_node(i))) {
-+ printk("cpu with no node %d, num_online_nodes %d\n",
-+ i, num_online_nodes());
-+ ptr = alloc_bootmem(size);
-+ } else {
-+ ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
-+ }
-+ if (!ptr)
-+ panic("Cannot allocate cpu data for CPU %d\n", i);
-+ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
-+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-+ }
-+}
-+
-+#ifdef CONFIG_XEN
-+static void switch_pt(void)
-+{
-+ xen_pt_switch(__pa(init_level4_pgt));
-+ xen_new_user_pt(__pa(init_level4_user_pgt));
-+}
-+
-+void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
-+{
-+ unsigned long frames[16];
-+ unsigned long va;
-+ int f;
-+
-+ for (va = gdt_descr->address, f = 0;
-+ va < gdt_descr->address + gdt_descr->size;
-+ va += PAGE_SIZE, f++) {
-+ frames[f] = virt_to_mfn(va);
-+ make_page_readonly(
-+ (void *)va, XENFEAT_writable_descriptor_tables);
-+ }
-+ if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
-+ sizeof (struct desc_struct)))
-+ BUG();
-+}
-+#else
-+static void switch_pt(void)
-+{
-+ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
-+}
-+
-+void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
-+{
-+ asm volatile("lgdt %0" :: "m" (*gdt_descr));
-+ asm volatile("lidt %0" :: "m" (idt_descr));
-+}
-+#endif
-+
-+void pda_init(int cpu)
-+{
-+ struct x8664_pda *pda = cpu_pda(cpu);
-+
-+ /* Setup up data that may be needed in __get_free_pages early */
-+ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
-+#ifndef CONFIG_XEN
-+ wrmsrl(MSR_GS_BASE, pda);
-+#else
-+ HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (unsigned long)pda);
-+#endif
-+ pda->cpunumber = cpu;
-+ pda->irqcount = -1;
-+ pda->kernelstack =
-+ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
-+
-+ if (cpu == 0) {
-+#ifdef CONFIG_XEN
-+ xen_init_pt();
-+#endif
-+ /* others are initialized in smpboot.c */
-+ pda->pcurrent = &init_task;
-+ pda->irqstackptr = boot_cpu_stack;
-+ } else {
-+ pda->irqstackptr = (char *)
-+ __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
-+ if (!pda->irqstackptr)
-+ panic("cannot allocate irqstack for cpu %d", cpu);
-+ }
-+
-+ switch_pt();
-+
-+ pda->irqstackptr += IRQSTACKSIZE-64;
-+}
-+
-+#ifndef CONFIG_X86_NO_TSS
-+char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
-+__attribute__((section(".bss.page_aligned")));
-+#endif
-+
-+/* May not be marked __init: used by software suspend */
-+void syscall_init(void)
-+{
-+#ifndef CONFIG_XEN
-+ /*
-+ * LSTAR and STAR live in a bit strange symbiosis.
-+ * They both write to the same internal register. STAR allows to set CS/DS
-+ * but only a 32bit target. LSTAR sets the 64bit rip.
-+ */
-+ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
-+ wrmsrl(MSR_LSTAR, system_call);
-+
-+ /* Flags to clear on syscall */
-+ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
-+#endif
-+#ifdef CONFIG_IA32_EMULATION
-+ syscall32_cpu_init ();
-+#endif
-+}
-+
-+void __cpuinit check_efer(void)
-+{
-+ unsigned long efer;
-+
-+ rdmsrl(MSR_EFER, efer);
-+ if (!(efer & EFER_NX) || do_not_nx) {
-+ __supported_pte_mask &= ~_PAGE_NX;
-+ }
-+}
-+
-+/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
-+ * A lot of state is already set up in PDA init.
-+ */
-+void __cpuinit cpu_init (void)
-+{
-+ int cpu = stack_smp_processor_id();
-+#ifndef CONFIG_X86_NO_TSS
-+ struct tss_struct *t = &per_cpu(init_tss, cpu);
-+ unsigned long v;
-+ char *estacks = NULL;
-+ unsigned i;
-+#endif
-+ struct task_struct *me;
-+
-+ /* CPU 0 is initialised in head64.c */
-+ if (cpu != 0) {
-+ pda_init(cpu);
-+ zap_low_mappings(cpu);
-+ }
-+#ifndef CONFIG_X86_NO_TSS
-+ else
-+ estacks = boot_exception_stacks;
-+#endif
-+
-+ me = current;
-+
-+ if (cpu_test_and_set(cpu, cpu_initialized))
-+ panic("CPU#%d already initialized!\n", cpu);
-+
-+ printk("Initializing CPU#%d\n", cpu);
-+
-+#ifndef CONFIG_XEN
-+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-+#endif
-+
-+ /*
-+ * Initialize the per-CPU GDT with the boot GDT,
-+ * and set up the GDT descriptor:
-+ */
-+#ifndef CONFIG_XEN
-+ if (cpu)
-+ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
-+#endif
-+
-+ cpu_gdt_descr[cpu].size = GDT_SIZE;
-+ cpu_gdt_init(&cpu_gdt_descr[cpu]);
-+
-+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
-+ syscall_init();
-+
-+ wrmsrl(MSR_FS_BASE, 0);
-+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
-+ barrier();
-+
-+ check_efer();
-+
-+#ifndef CONFIG_X86_NO_TSS
-+ /*
-+ * set up and load the per-CPU TSS
-+ */
-+ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
-+ if (cpu) {
-+ static const unsigned int order[N_EXCEPTION_STACKS] = {
-+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
-+ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
-+ };
-+
-+ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
-+ if (!estacks)
-+ panic("Cannot allocate exception stack %ld %d\n",
-+ v, cpu);
-+ }
-+ switch (v + 1) {
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+ case DEBUG_STACK:
-+ cpu_pda[cpu].debugstack = (unsigned long)estacks;
-+ estacks += DEBUG_STKSZ;
-+ break;
-+#endif
-+ default:
-+ estacks += EXCEPTION_STKSZ;
-+ break;
-+ }
-+ t->ist[v] = (unsigned long)estacks;
-+ }
-+
-+ t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
-+ /*
-+ * <= is required because the CPU will access up to
-+ * 8 bits beyond the end of the IO permission bitmap.
-+ */
-+ for (i = 0; i <= IO_BITMAP_LONGS; i++)
-+ t->io_bitmap[i] = ~0UL;
-+#endif
-+
-+ atomic_inc(&init_mm.mm_count);
-+ me->active_mm = &init_mm;
-+ if (me->mm)
-+ BUG();
-+ enter_lazy_tlb(&init_mm, me);
-+
-+#ifndef CONFIG_X86_NO_TSS
-+ set_tss_desc(cpu, t);
-+#endif
-+#ifndef CONFIG_XEN
-+ load_TR_desc();
-+#endif
-+ load_LDT(&init_mm.context);
-+
-+ /*
-+ * Clear all 6 debug registers:
-+ */
-+
-+ set_debug(0UL, 0);
-+ set_debug(0UL, 1);
-+ set_debug(0UL, 2);
-+ set_debug(0UL, 3);
-+ set_debug(0UL, 6);
-+ set_debug(0UL, 7);
-+
-+ fpu_init();
-+}
-diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
-index 70f1bb8..8d48eed 100644
---- a/arch/x86_64/kernel/setup64.c
-+++ b/arch/x86_64/kernel/setup64.c
-@@ -128,8 +128,10 @@ void pda_init(int cpu)
- pda->irqcount = -1;
- pda->kernelstack =
- (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
-+#ifdef CONFIG_SMP
- pda->active_mm = &init_mm;
- pda->mmu_state = 0;
-+#endif
-
- if (cpu == 0) {
- /* others are initialized in smpboot.c */
-diff --git a/arch/x86_64/kernel/smp-xen.c b/arch/x86_64/kernel/smp-xen.c
-new file mode 100644
-index 0000000..5aef99e
---- /dev/null
-+++ b/arch/x86_64/kernel/smp-xen.c
-@@ -0,0 +1,592 @@
-+/*
-+ * Intel SMP support routines.
-+ *
-+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
-+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
-+ * (c) 2002,2003 Andi Kleen, SuSE Labs.
-+ *
-+ * This code is released under the GNU General Public License version 2 or
-+ * later.
-+ */
-+
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp_lock.h>
-+#include <linux/smp.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/interrupt.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/pgalloc.h>
-+#include <asm/tlbflush.h>
-+#include <asm/mach_apic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/apicdef.h>
-+#include <asm/idle.h>
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Smarter SMP flushing macros.
-+ * c/o Linus Torvalds.
-+ *
-+ * These mean you can really definitely utterly forget about
-+ * writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
-+ *
-+ * More scalable flush, from Andi Kleen
-+ *
-+ * To avoid global state use 8 different call vectors.
-+ * Each CPU uses a specific vector to trigger flushes on other
-+ * CPUs. Depending on the received vector the target CPUs look into
-+ * the right per cpu variable for the flush data.
-+ *
-+ * With more than 8 CPUs they are hashed to the 8 available
-+ * vectors. The limited global vector space forces us to this right now.
-+ * In future when interrupts are split into per CPU domains this could be
-+ * fixed, at the cost of triggering multiple IPIs in some cases.
-+ */
-+
-+union smp_flush_state {
-+ struct {
-+ cpumask_t flush_cpumask;
-+ struct mm_struct *flush_mm;
-+ unsigned long flush_va;
-+#define FLUSH_ALL -1ULL
-+ spinlock_t tlbstate_lock;
-+ };
-+ char pad[SMP_CACHE_BYTES];
-+} ____cacheline_aligned;
-+
-+/* State is put into the per CPU data section, but padded
-+ to a full cache line because other CPUs can access it and we don't
-+ want false sharing in the per cpu data segment. */
-+static DEFINE_PER_CPU(union smp_flush_state, flush_state);
-+
-+/*
-+ * We cannot call mmdrop() because we are in interrupt context,
-+ * instead update mm->cpu_vm_mask.
-+ */
-+static inline void leave_mm(unsigned long cpu)
-+{
-+ if (read_pda(mmu_state) == TLBSTATE_OK)
-+ BUG();
-+ clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
-+ load_cr3(swapper_pg_dir);
-+}
-+
-+/*
-+ *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
-+ * Stop ipi delivery for the old mm. This is not synchronized with
-+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * for the wrong mm, and in the worst case we perform a superfluous
-+ * tlb flush.
-+ * 1a2) set cpu mmu_state to TLBSTATE_OK
-+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ * was in lazy tlb mode.
-+ * 1a3) update cpu active_mm
-+ * Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
-+ * Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ * cpu active_mm is correct, cpu0 already handles
-+ * flush ipis.
-+ * 1b1) set cpu mmu_state to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * Atomically set the bit [other cpus will start sending flush ipis],
-+ * and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
-+ *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ * runs in kernel space, the cpu could load tlb entries for user space
-+ * pages.
-+ *
-+ * The good news is that cpu mmu_state is local to each cpu, no
-+ * write/read ordering problems.
-+ */
-+
-+/*
-+ * TLB flush IPI:
-+ *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
-+ *
-+ * Interrupts are disabled.
-+ */
-+
-+asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
-+{
-+ int cpu;
-+ int sender;
-+ union smp_flush_state *f;
-+
-+ cpu = smp_processor_id();
-+ /*
-+ * orig_rax contains the interrupt vector - 256.
-+ * Use that to determine where the sender put the data.
-+ */
-+ sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
-+ f = &per_cpu(flush_state, sender);
-+
-+ if (!cpu_isset(cpu, f->flush_cpumask))
-+ goto out;
-+ /*
-+ * This was a BUG() but until someone can quote me the
-+ * line from the intel manual that guarantees an IPI to
-+ * multiple CPUs is retried _only_ on the erroring CPUs
-+ * its staying as a return
-+ *
-+ * BUG();
-+ */
-+
-+ if (f->flush_mm == read_pda(active_mm)) {
-+ if (read_pda(mmu_state) == TLBSTATE_OK) {
-+ if (f->flush_va == FLUSH_ALL)
-+ local_flush_tlb();
-+ else
-+ __flush_tlb_one(f->flush_va);
-+ } else
-+ leave_mm(cpu);
-+ }
-+out:
-+ ack_APIC_irq();
-+ cpu_clear(cpu, f->flush_cpumask);
-+}
-+
-+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-+ unsigned long va)
-+{
-+ int sender;
-+ union smp_flush_state *f;
-+
-+ /* Caller has disabled preemption */
-+ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
-+ f = &per_cpu(flush_state, sender);
-+
-+ /* Could avoid this lock when
-+ num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-+ probably not worth checking this for a cache-hot lock. */
-+ spin_lock(&f->tlbstate_lock);
-+
-+ f->flush_mm = mm;
-+ f->flush_va = va;
-+ cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
-+
-+ /*
-+ * We have to send the IPI only to
-+ * CPUs affected.
-+ */
-+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
-+
-+ while (!cpus_empty(f->flush_cpumask))
-+ cpu_relax();
-+
-+ f->flush_mm = NULL;
-+ f->flush_va = 0;
-+ spin_unlock(&f->tlbstate_lock);
-+}
-+
-+int __cpuinit init_smp_flush(void)
-+{
-+ int i;
-+ for_each_cpu_mask(i, cpu_possible_map) {
-+ spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
-+ }
-+ return 0;
-+}
-+
-+core_initcall(init_smp_flush);
-+
-+void flush_tlb_current_task(void)
-+{
-+ struct mm_struct *mm = current->mm;
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ local_flush_tlb();
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+ preempt_enable();
-+}
-+
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ if (current->active_mm == mm) {
-+ if (current->mm)
-+ local_flush_tlb();
-+ else
-+ leave_mm(smp_processor_id());
-+ }
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+
-+ preempt_enable();
-+}
-+
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ cpumask_t cpu_mask;
-+
-+ preempt_disable();
-+ cpu_mask = mm->cpu_vm_mask;
-+ cpu_clear(smp_processor_id(), cpu_mask);
-+
-+ if (current->active_mm == mm) {
-+ if(current->mm)
-+ __flush_tlb_one(va);
-+ else
-+ leave_mm(smp_processor_id());
-+ }
-+
-+ if (!cpus_empty(cpu_mask))
-+ flush_tlb_others(cpu_mask, mm, va);
-+
-+ preempt_enable();
-+}
-+
-+static void do_flush_tlb_all(void* info)
-+{
-+ unsigned long cpu = smp_processor_id();
-+
-+ __flush_tlb_all();
-+ if (read_pda(mmu_state) == TLBSTATE_LAZY)
-+ leave_mm(cpu);
-+}
-+
-+void flush_tlb_all(void)
-+{
-+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-+}
-+#else
-+#include <xen/evtchn.h>
-+asmlinkage void smp_invalidate_interrupt (void)
-+{ return; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm (struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+#endif /* Xen */
-+
-+/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
-+ */
-+
-+void smp_send_reschedule(int cpu)
-+{
-+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-+}
-+
-+/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
-+ */
-+static DEFINE_SPINLOCK(call_lock);
-+
-+struct call_data_struct {
-+ void (*func) (void *info);
-+ void *info;
-+ atomic_t started;
-+ atomic_t finished;
-+ int wait;
-+};
-+
-+static struct call_data_struct * call_data;
-+
-+void lock_ipi_call_lock(void)
-+{
-+ spin_lock_irq(&call_lock);
-+}
-+
-+void unlock_ipi_call_lock(void)
-+{
-+ spin_unlock_irq(&call_lock);
-+}
-+
-+/*
-+ * this function sends a 'generic call function' IPI to one other CPU
-+ * in the system.
-+ *
-+ * cpu is a standard Linux logical CPU number.
-+ */
-+static void
-+__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-+ int nonatomic, int wait)
-+{
-+ struct call_data_struct data;
-+ int cpus = 1;
-+
-+ data.func = func;
-+ data.info = info;
-+ atomic_set(&data.started, 0);
-+ data.wait = wait;
-+ if (wait)
-+ atomic_set(&data.finished, 0);
-+
-+ call_data = &data;
-+ wmb();
-+ /* Send a message to all other CPUs and wait for them to respond */
-+ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-+
-+ /* Wait for response */
-+ while (atomic_read(&data.started) != cpus)
-+ cpu_relax();
-+
-+ if (!wait)
-+ return;
-+
-+ while (atomic_read(&data.finished) != cpus)
-+ cpu_relax();
-+}
-+
-+/*
-+ * smp_call_function_single - Run a function on another CPU
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: Currently unused.
-+ * @wait: If true, wait until function has completed on other CPUs.
-+ *
-+ * Retrurns 0 on success, else a negative status code.
-+ *
-+ * Does not return until the remote CPU is nearly ready to execute <func>
-+ * or is or has executed.
-+ */
-+
-+int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
-+ int nonatomic, int wait)
-+{
-+ /* prevent preemption and reschedule on another processor */
-+ int me = get_cpu();
-+ if (cpu == me) {
-+ WARN_ON(1);
-+ put_cpu();
-+ return -EBUSY;
-+ }
-+ spin_lock_bh(&call_lock);
-+ __smp_call_function_single(cpu, func, info, nonatomic, wait);
-+ spin_unlock_bh(&call_lock);
-+ put_cpu();
-+ return 0;
-+}
-+
-+/*
-+ * this function sends a 'generic call function' IPI to all other CPUs
-+ * in the system.
-+ */
-+static void __smp_call_function (void (*func) (void *info), void *info,
-+ int nonatomic, int wait)
-+{
-+ struct call_data_struct data;
-+ int cpus = num_online_cpus()-1;
-+
-+ if (!cpus)
-+ return;
-+
-+ data.func = func;
-+ data.info = info;
-+ atomic_set(&data.started, 0);
-+ data.wait = wait;
-+ if (wait)
-+ atomic_set(&data.finished, 0);
-+
-+ call_data = &data;
-+ wmb();
-+ /* Send a message to all other CPUs and wait for them to respond */
-+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-+
-+ /* Wait for response */
-+ while (atomic_read(&data.started) != cpus)
-+#ifndef CONFIG_XEN
-+ cpu_relax();
-+#else
-+ barrier();
-+#endif
-+
-+ if (!wait)
-+ return;
-+
-+ while (atomic_read(&data.finished) != cpus)
-+#ifndef CONFIG_XEN
-+ cpu_relax();
-+#else
-+ barrier();
-+#endif
-+}
-+
-+/*
-+ * smp_call_function - run a function on all other CPUs.
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: currently unused.
-+ * @wait: If true, wait (atomically) until function has completed on other
-+ * CPUs.
-+ *
-+ * Returns 0 on success, else a negative status code. Does not return until
-+ * remote CPUs are nearly ready to execute func or are or have executed.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ * Actually there are a few legal cases, like panic.
-+ */
-+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-+ int wait)
-+{
-+ spin_lock(&call_lock);
-+ __smp_call_function(func,info,nonatomic,wait);
-+ spin_unlock(&call_lock);
-+ return 0;
-+}
-+
-+void smp_stop_cpu(void)
-+{
-+ unsigned long flags;
-+ /*
-+ * Remove this CPU:
-+ */
-+ cpu_clear(smp_processor_id(), cpu_online_map);
-+ local_irq_save(flags);
-+#ifndef CONFIG_XEN
-+ disable_local_APIC();
-+#endif
-+ local_irq_restore(flags);
-+}
-+
-+static void smp_really_stop_cpu(void *dummy)
-+{
-+ smp_stop_cpu();
-+ for (;;)
-+ asm("hlt");
-+}
-+
-+void smp_send_stop(void)
-+{
-+ int nolock = 0;
-+#ifndef CONFIG_XEN
-+ if (reboot_force)
-+ return;
-+#endif
-+ /* Don't deadlock on the call lock in panic */
-+ if (!spin_trylock(&call_lock)) {
-+ /* ignore locking because we have paniced anyways */
-+ nolock = 1;
-+ }
-+ __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
-+ if (!nolock)
-+ spin_unlock(&call_lock);
-+
-+ local_irq_disable();
-+#ifndef CONFIG_XEN
-+ disable_local_APIC();
-+#endif
-+ local_irq_enable();
-+}
-+
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_reschedule_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_reschedule_interrupt(void)
-+#endif
-+{
-+#ifndef CONFIG_XEN
-+ ack_APIC_irq();
-+#else
-+ return IRQ_HANDLED;
-+#endif
-+}
-+
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_call_function_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_call_function_interrupt(void)
-+#endif
-+{
-+ void (*func) (void *info) = call_data->func;
-+ void *info = call_data->info;
-+ int wait = call_data->wait;
-+
-+#ifndef CONFIG_XEN
-+ ack_APIC_irq();
-+#endif
-+ /*
-+ * Notify initiating CPU that I've grabbed the data and am
-+ * about to execute the function
-+ */
-+ mb();
-+ atomic_inc(&call_data->started);
-+ /*
-+ * At this point the info structure may be out of scope unless wait==1
-+ */
-+ exit_idle();
-+ irq_enter();
-+ (*func)(info);
-+ irq_exit();
-+ if (wait) {
-+ mb();
-+ atomic_inc(&call_data->finished);
-+ }
-+#ifdef CONFIG_XEN
-+ return IRQ_HANDLED;
-+#endif
-+}
-+
-+int safe_smp_processor_id(void)
-+{
-+#ifdef CONFIG_XEN
-+ return smp_processor_id();
-+#else
-+ int apicid, i;
-+
-+ if (disable_apic)
-+ return 0;
-+
-+ apicid = hard_smp_processor_id();
-+ if (x86_cpu_to_apicid[apicid] == apicid)
-+ return apicid;
-+
-+ for (i = 0; i < NR_CPUS; ++i) {
-+ if (x86_cpu_to_apicid[i] == apicid)
-+ return i;
-+ }
-+
-+ /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
-+ * or called too early. Either way, we must be CPU 0. */
-+ if (x86_cpu_to_apicid[0] == BAD_APICID)
-+ return 0;
-+
-+ return 0; /* Should not happen */
-+#endif
-+}
-diff --git a/arch/x86_64/kernel/traps-xen.c b/arch/x86_64/kernel/traps-xen.c
-new file mode 100644
-index 0000000..9fd0b94
---- /dev/null
-+++ b/arch/x86_64/kernel/traps-xen.c
-@@ -0,0 +1,1023 @@
-+/*
-+ * linux/arch/x86-64/traps.c
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
-+ *
-+ * Pentium III FXSR, SSE support
-+ * Gareth Hughes <gareth@valinux.com>, May 2000
-+ *
-+ * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
-+ */
-+
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'entry.S'.
-+ */
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/ptrace.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/interrupt.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/nmi.h>
-+#include <linux/kprobes.h>
-+
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/kdebug.h>
-+#include <asm/processor.h>
-+
-+#include <asm/smp.h>
-+#include <asm/pgalloc.h>
-+#include <asm/pda.h>
-+#include <asm/proto.h>
-+#include <asm/nmi.h>
-+
-+#ifndef CONFIG_X86_NO_IDT
-+extern struct gate_struct idt_table[256];
-+#endif
-+
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void double_fault(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void reserved(void);
-+asmlinkage void alignment_check(void);
-+asmlinkage void machine_check(void);
-+asmlinkage void spurious_interrupt_bug(void);
-+
-+struct notifier_block *die_chain;
-+static DEFINE_SPINLOCK(die_notifier_lock);
-+
-+int register_die_notifier(struct notifier_block *nb)
-+{
-+ int err = 0;
-+ unsigned long flags;
-+ spin_lock_irqsave(&die_notifier_lock, flags);
-+ err = notifier_chain_register(&die_chain, nb);
-+ spin_unlock_irqrestore(&die_notifier_lock, flags);
-+ return err;
-+}
-+
-+static inline void conditional_sti(struct pt_regs *regs)
-+{
-+ if (regs->eflags & X86_EFLAGS_IF)
-+ local_irq_enable();
-+}
-+
-+static inline void preempt_conditional_sti(struct pt_regs *regs)
-+{
-+ preempt_disable();
-+ if (regs->eflags & X86_EFLAGS_IF)
-+ local_irq_enable();
-+}
-+
-+static inline void preempt_conditional_cli(struct pt_regs *regs)
-+{
-+ if (regs->eflags & X86_EFLAGS_IF)
-+ local_irq_disable();
-+ preempt_enable_no_resched();
-+}
-+
-+static int kstack_depth_to_print = 10;
-+
-+#ifdef CONFIG_KALLSYMS
-+#include <linux/kallsyms.h>
-+int printk_address(unsigned long address)
-+{
-+ unsigned long offset = 0, symsize;
-+ const char *symname;
-+ char *modname;
-+ char *delim = ":";
-+ char namebuf[128];
-+
-+ symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
-+ if (!symname)
-+ return printk("[<%016lx>]", address);
-+ if (!modname)
-+ modname = delim = "";
-+ return printk("<%016lx>{%s%s%s%s%+ld}",
-+ address,delim,modname,delim,symname,offset);
-+}
-+#else
-+int printk_address(unsigned long address)
-+{
-+ return printk("[<%016lx>]", address);
-+}
-+#endif
-+
-+static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
-+ unsigned *usedp, const char **idp)
-+{
-+#ifndef CONFIG_X86_NO_TSS
-+ static char ids[][8] = {
-+ [DEBUG_STACK - 1] = "#DB",
-+ [NMI_STACK - 1] = "NMI",
-+ [DOUBLEFAULT_STACK - 1] = "#DF",
-+ [STACKFAULT_STACK - 1] = "#SS",
-+ [MCE_STACK - 1] = "#MC",
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
-+#endif
-+ };
-+ unsigned k;
-+
-+ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
-+ unsigned long end;
-+
-+ switch (k + 1) {
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+ case DEBUG_STACK:
-+ end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
-+ break;
-+#endif
-+ default:
-+ end = per_cpu(init_tss, cpu).ist[k];
-+ break;
-+ }
-+ if (stack >= end)
-+ continue;
-+ if (stack >= end - EXCEPTION_STKSZ) {
-+ if (*usedp & (1U << k))
-+ break;
-+ *usedp |= 1U << k;
-+ *idp = ids[k];
-+ return (unsigned long *)end;
-+ }
-+#if DEBUG_STKSZ > EXCEPTION_STKSZ
-+ if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
-+ unsigned j = N_EXCEPTION_STACKS - 1;
-+
-+ do {
-+ ++j;
-+ end -= EXCEPTION_STKSZ;
-+ ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
-+ } while (stack < end - EXCEPTION_STKSZ);
-+ if (*usedp & (1U << j))
-+ break;
-+ *usedp |= 1U << j;
-+ *idp = ids[j];
-+ return (unsigned long *)end;
-+ }
-+#endif
-+ }
-+#endif
-+ return NULL;
-+}
-+
-+/*
-+ * x86-64 can have upto three kernel stacks:
-+ * process stack
-+ * interrupt stack
-+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
-+ */
-+
-+void show_trace(unsigned long *stack)
-+{
-+ const unsigned cpu = safe_smp_processor_id();
-+ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
-+ int i;
-+ unsigned used = 0;
-+
-+ printk("\nCall Trace:");
-+
-+#define HANDLE_STACK(cond) \
-+ do while (cond) { \
-+ unsigned long addr = *stack++; \
-+ if (kernel_text_address(addr)) { \
-+ if (i > 50) { \
-+ printk("\n "); \
-+ i = 0; \
-+ } \
-+ else \
-+ i += printk(" "); \
-+ /* \
-+ * If the address is either in the text segment of the \
-+ * kernel, or in the region which contains vmalloc'ed \
-+ * memory, it *may* be the address of a calling \
-+ * routine; if so, print it so that someone tracing \
-+ * down the cause of the crash will be able to figure \
-+ * out the call path that was taken. \
-+ */ \
-+ i += printk_address(addr); \
-+ } \
-+ } while (0)
-+
-+ for(i = 11; ; ) {
-+ const char *id;
-+ unsigned long *estack_end;
-+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
-+ &used, &id);
-+
-+ if (estack_end) {
-+ i += printk(" <%s>", id);
-+ HANDLE_STACK (stack < estack_end);
-+ i += printk(" <EOE>");
-+ stack = (unsigned long *) estack_end[-2];
-+ continue;
-+ }
-+ if (irqstack_end) {
-+ unsigned long *irqstack;
-+ irqstack = irqstack_end -
-+ (IRQSTACKSIZE - 64) / sizeof(*irqstack);
-+
-+ if (stack >= irqstack && stack < irqstack_end) {
-+ i += printk(" <IRQ>");
-+ HANDLE_STACK (stack < irqstack_end);
-+ stack = (unsigned long *) (irqstack_end[-1]);
-+ irqstack_end = NULL;
-+ i += printk(" <EOI>");
-+ continue;
-+ }
-+ }
-+ break;
-+ }
-+
-+ HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
-+#undef HANDLE_STACK
-+ printk("\n");
-+}
-+
-+void show_stack(struct task_struct *tsk, unsigned long * rsp)
-+{
-+ unsigned long *stack;
-+ int i;
-+ const int cpu = safe_smp_processor_id();
-+ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
-+ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
-+
-+ // debugging aid: "show_stack(NULL, NULL);" prints the
-+ // back trace for this cpu.
-+
-+ if (rsp == NULL) {
-+ if (tsk)
-+ rsp = (unsigned long *)tsk->thread.rsp;
-+ else
-+ rsp = (unsigned long *)&rsp;
-+ }
-+
-+ stack = rsp;
-+ for(i=0; i < kstack_depth_to_print; i++) {
-+ if (stack >= irqstack && stack <= irqstack_end) {
-+ if (stack == irqstack_end) {
-+ stack = (unsigned long *) (irqstack_end[-1]);
-+ printk(" <EOI> ");
-+ }
-+ } else {
-+ if (((long) stack & (THREAD_SIZE-1)) == 0)
-+ break;
-+ }
-+ if (i && ((i % 4) == 0))
-+ printk("\n ");
-+ printk("%016lx ", *stack++);
-+ touch_nmi_watchdog();
-+ }
-+ show_trace((unsigned long *)rsp);
-+}
-+
-+/*
-+ * The architecture-independent dump_stack generator
-+ */
-+void dump_stack(void)
-+{
-+ unsigned long dummy;
-+ show_trace(&dummy);
-+}
-+
-+EXPORT_SYMBOL(dump_stack);
-+
-+void show_registers(struct pt_regs *regs)
-+{
-+ int i;
-+ int in_kernel = !user_mode(regs);
-+ unsigned long rsp;
-+ const int cpu = safe_smp_processor_id();
-+ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
-+
-+ rsp = regs->rsp;
-+
-+ printk("CPU %d ", cpu);
-+ __show_regs(regs);
-+ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
-+ cur->comm, cur->pid, task_thread_info(cur), cur);
-+
-+ /*
-+ * When in-kernel, we also print out the stack and code at the
-+ * time of the fault..
-+ */
-+ if (in_kernel) {
-+
-+ printk("Stack: ");
-+ show_stack(NULL, (unsigned long*)rsp);
-+
-+ printk("\nCode: ");
-+ if(regs->rip < PAGE_OFFSET)
-+ goto bad;
-+
-+ for(i=0;i<20;i++)
-+ {
-+ unsigned char c;
-+ if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
-+bad:
-+ printk(" Bad RIP value.");
-+ break;
-+ }
-+ printk("%02x ", c);
-+ }
-+ }
-+ printk("\n");
-+}
-+
-+void handle_BUG(struct pt_regs *regs)
-+{
-+ struct bug_frame f;
-+ long len;
-+ const char *prefix = "";
-+
-+ if (user_mode(regs))
-+ return;
-+ if (__copy_from_user(&f, (const void __user *) regs->rip,
-+ sizeof(struct bug_frame)))
-+ return;
-+ if (f.filename >= 0 ||
-+ f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
-+ return;
-+ len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
-+ if (len < 0 || len >= PATH_MAX)
-+ f.filename = (int)(long)"unmapped filename";
-+ else if (len > 50) {
-+ f.filename += len - 50;
-+ prefix = "...";
-+ }
-+ printk("----------- [cut here ] --------- [please bite here ] ---------\n");
-+ printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
-+}
-+
-+#ifdef CONFIG_BUG
-+void out_of_line_bug(void)
-+{
-+ BUG();
-+}
-+#endif
-+
-+static DEFINE_SPINLOCK(die_lock);
-+static int die_owner = -1;
-+
-+unsigned __kprobes long oops_begin(void)
-+{
-+ int cpu = safe_smp_processor_id();
-+ unsigned long flags;
-+
-+ /* racy, but better than risking deadlock. */
-+ local_irq_save(flags);
-+ if (!spin_trylock(&die_lock)) {
-+ if (cpu == die_owner)
-+ /* nested oops. should stop eventually */;
-+ else
-+ spin_lock(&die_lock);
-+ }
-+ die_owner = cpu;
-+ console_verbose();
-+ bust_spinlocks(1);
-+ return flags;
-+}
-+
-+void __kprobes oops_end(unsigned long flags)
-+{
-+ die_owner = -1;
-+ bust_spinlocks(0);
-+ spin_unlock_irqrestore(&die_lock, flags);
-+ if (panic_on_oops)
-+ panic("Oops");
-+}
-+
-+void __kprobes __die(const char * str, struct pt_regs * regs, long err)
-+{
-+ static int die_counter;
-+ printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
-+#ifdef CONFIG_PREEMPT
-+ printk("PREEMPT ");
-+#endif
-+#ifdef CONFIG_SMP
-+ printk("SMP ");
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+ printk("DEBUG_PAGEALLOC");
-+#endif
-+ printk("\n");
-+ notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
-+ show_registers(regs);
-+ /* Executive summary in case the oops scrolled away */
-+ printk(KERN_ALERT "RIP ");
-+ printk_address(regs->rip);
-+ printk(" RSP <%016lx>\n", regs->rsp);
-+}
-+
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+ unsigned long flags = oops_begin();
-+
-+ handle_BUG(regs);
-+ __die(str, regs, err);
-+ oops_end(flags);
-+ do_exit(SIGSEGV);
-+}
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+void __kprobes die_nmi(char *str, struct pt_regs *regs)
-+{
-+ unsigned long flags = oops_begin();
-+
-+ /*
-+ * We are in trouble anyway, lets at least try
-+ * to get a message out.
-+ */
-+ printk(str, safe_smp_processor_id());
-+ show_registers(regs);
-+ if (panic_on_timeout || panic_on_oops)
-+ panic("nmi watchdog");
-+ printk("console shuts up ...\n");
-+ oops_end(flags);
-+ do_exit(SIGSEGV);
-+}
-+#endif
-+
-+static void __kprobes do_trap(int trapnr, int signr, char *str,
-+ struct pt_regs * regs, long error_code,
-+ siginfo_t *info)
-+{
-+ struct task_struct *tsk = current;
-+
-+ conditional_sti(regs);
-+
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = trapnr;
-+
-+ if (user_mode(regs)) {
-+ if (exception_trace && unhandled_signal(tsk, signr))
-+ printk(KERN_INFO
-+ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
-+ tsk->comm, tsk->pid, str,
-+ regs->rip,regs->rsp,error_code);
-+
-+ if (info)
-+ force_sig_info(signr, info, tsk);
-+ else
-+ force_sig(signr, tsk);
-+ return;
-+ }
-+
-+
-+ /* kernel trap */
-+ {
-+ const struct exception_table_entry *fixup;
-+ fixup = search_exception_tables(regs->rip);
-+ if (fixup) {
-+ regs->rip = fixup->fixup;
-+ } else
-+ die(str, regs, error_code);
-+ return;
-+ }
-+}
-+
-+#define DO_ERROR(trapnr, signr, str, name) \
-+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, regs, error_code, NULL); \
-+}
-+
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+ siginfo_t info; \
-+ info.si_signo = signr; \
-+ info.si_errno = 0; \
-+ info.si_code = sicode; \
-+ info.si_addr = (void __user *)siaddr; \
-+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+ == NOTIFY_STOP) \
-+ return; \
-+ do_trap(trapnr, signr, str, regs, error_code, &info); \
-+}
-+
-+DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
-+DO_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
-+DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+DO_ERROR(18, SIGSEGV, "reserved", reserved)
-+DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
-+
-+asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
-+{
-+ static const char str[] = "double fault";
-+ struct task_struct *tsk = current;
-+
-+ /* Return not checked because double check cannot be ignored */
-+ notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
-+
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 8;
-+
-+ /* This is always a kernel trap and never fixable (and thus must
-+ never return). */
-+ for (;;)
-+ die(str, regs, error_code);
-+}
-+
-+asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
-+ long error_code)
-+{
-+ struct task_struct *tsk = current;
-+
-+ conditional_sti(regs);
-+
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 13;
-+
-+ if (user_mode(regs)) {
-+ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
-+ printk(KERN_INFO
-+ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
-+ tsk->comm, tsk->pid,
-+ regs->rip,regs->rsp,error_code);
-+
-+ force_sig(SIGSEGV, tsk);
-+ return;
-+ }
-+
-+ /* kernel gp */
-+ {
-+ const struct exception_table_entry *fixup;
-+ fixup = search_exception_tables(regs->rip);
-+ if (fixup) {
-+ regs->rip = fixup->fixup;
-+ return;
-+ }
-+ if (notify_die(DIE_GPF, "general protection fault", regs,
-+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+ return;
-+ die("general protection fault", regs, error_code);
-+ }
-+}
-+
-+static __kprobes void
-+mem_parity_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
-+ printk("You probably have a hardware problem with your RAM chips\n");
-+
-+#if 0 /* XEN */
-+ /* Clear and disable the memory parity error line. */
-+ reason = (reason & 0xf) | 4;
-+ outb(reason, 0x61);
-+#endif /* XEN */
-+}
-+
-+static __kprobes void
-+io_check_error(unsigned char reason, struct pt_regs * regs)
-+{
-+ printk("NMI: IOCK error (debug interrupt?)\n");
-+ show_registers(regs);
-+
-+#if 0 /* XEN */
-+ /* Re-enable the IOCK line, wait for a few seconds */
-+ reason = (reason & 0xf) | 8;
-+ outb(reason, 0x61);
-+ mdelay(2000);
-+ reason &= ~8;
-+ outb(reason, 0x61);
-+#endif /* XEN */
-+}
-+
-+static __kprobes void
-+unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
-+ printk("Dazed and confused, but trying to continue\n");
-+ printk("Do you have a strange power saving mode enabled?\n");
-+}
-+
-+/* Runs on IST stack. This code must keep interrupts off all the time.
-+ Nested NMIs are prevented by the CPU. */
-+asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
-+{
-+ unsigned char reason = 0;
-+ int cpu;
-+
-+ cpu = smp_processor_id();
-+
-+ /* Only the BSP gets external NMIs from the system. */
-+ if (!cpu)
-+ reason = get_nmi_reason();
-+
-+ if (!(reason & 0xc0)) {
-+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-+ == NOTIFY_STOP)
-+ return;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ /*
-+ * Ok, so this is none of the documented NMI sources,
-+ * so it must be the NMI watchdog.
-+ */
-+ if (nmi_watchdog > 0) {
-+ nmi_watchdog_tick(regs,reason);
-+ return;
-+ }
-+#endif
-+ unknown_nmi_error(reason, regs);
-+ return;
-+ }
-+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-+ return;
-+
-+ /* AK: following checks seem to be broken on modern chipsets. FIXME */
-+
-+ if (reason & 0x80)
-+ mem_parity_error(reason, regs);
-+ if (reason & 0x40)
-+ io_check_error(reason, regs);
-+}
-+
-+/* runs on IST stack. */
-+asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
-+{
-+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
-+ return;
-+ }
-+ do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
-+ return;
-+}
-+
-+/* Help handler running on IST stack to switch back to user stack
-+ for scheduling or signal handling. The actual stack switch is done in
-+ entry.S */
-+asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
-+{
-+ struct pt_regs *regs = eregs;
-+ /* Did already sync */
-+ if (eregs == (struct pt_regs *)eregs->rsp)
-+ ;
-+ /* Exception from user space */
-+ else if (user_mode(eregs))
-+ regs = task_pt_regs(current);
-+ /* Exception from kernel and interrupts are enabled. Move to
-+ kernel process stack. */
-+ else if (eregs->eflags & X86_EFLAGS_IF)
-+ regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
-+ if (eregs != regs)
-+ *regs = *eregs;
-+ return regs;
-+}
-+
-+/* runs on IST stack. */
-+asmlinkage void __kprobes do_debug(struct pt_regs * regs,
-+ unsigned long error_code)
-+{
-+ unsigned long condition;
-+ struct task_struct *tsk = current;
-+ siginfo_t info;
-+
-+ get_debugreg(condition, 6);
-+
-+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+ SIGTRAP) == NOTIFY_STOP)
-+ return;
-+
-+ preempt_conditional_sti(regs);
-+
-+ /* Mask out spurious debug traps due to lazy DR7 setting */
-+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+ if (!tsk->thread.debugreg7) {
-+ goto clear_dr7;
-+ }
-+ }
-+
-+ tsk->thread.debugreg6 = condition;
-+
-+ /* Mask out spurious TF errors due to lazy TF clearing */
-+ if (condition & DR_STEP) {
-+ /*
-+ * The TF error should be masked out only if the current
-+ * process is not traced and if the TRAP flag has been set
-+ * previously by a tracing process (condition detected by
-+ * the PT_DTRACE flag); remember that the i386 TRAP flag
-+ * can be modified by the process itself in user mode,
-+ * allowing programs to debug themselves without the ptrace()
-+ * interface.
-+ */
-+ if (!user_mode(regs))
-+ goto clear_TF_reenable;
-+ /*
-+ * Was the TF flag set by a debugger? If so, clear it now,
-+ * so that register information is correct.
-+ */
-+ if (tsk->ptrace & PT_DTRACE) {
-+ regs->eflags &= ~TF_MASK;
-+ tsk->ptrace &= ~PT_DTRACE;
-+ }
-+ }
-+
-+ /* Ok, finally something we can handle */
-+ tsk->thread.trap_no = 1;
-+ tsk->thread.error_code = error_code;
-+ info.si_signo = SIGTRAP;
-+ info.si_errno = 0;
-+ info.si_code = TRAP_BRKPT;
-+ info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
-+ force_sig_info(SIGTRAP, &info, tsk);
-+
-+clear_dr7:
-+ set_debugreg(0UL, 7);
-+ preempt_conditional_cli(regs);
-+ return;
-+
-+clear_TF_reenable:
-+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+ regs->eflags &= ~TF_MASK;
-+ preempt_conditional_cli(regs);
-+}
-+
-+static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
-+{
-+ const struct exception_table_entry *fixup;
-+ fixup = search_exception_tables(regs->rip);
-+ if (fixup) {
-+ regs->rip = fixup->fixup;
-+ return 1;
-+ }
-+ notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
-+ /* Illegal floating point operation in the kernel */
-+ current->thread.trap_no = trapnr;
-+ die(str, regs, 0);
-+ return 0;
-+}
-+
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
-+ */
-+asmlinkage void do_coprocessor_error(struct pt_regs *regs)
-+{
-+ void __user *rip = (void __user *)(regs->rip);
-+ struct task_struct * task;
-+ siginfo_t info;
-+ unsigned short cwd, swd;
-+
-+ conditional_sti(regs);
-+ if (!user_mode(regs) &&
-+ kernel_math_error(regs, "kernel x87 math error", 16))
-+ return;
-+
-+ /*
-+ * Save the info for the exception handler and clear the error.
-+ */
-+ task = current;
-+ save_init_fpu(task);
-+ task->thread.trap_no = 16;
-+ task->thread.error_code = 0;
-+ info.si_signo = SIGFPE;
-+ info.si_errno = 0;
-+ info.si_code = __SI_FAULT;
-+ info.si_addr = rip;
-+ /*
-+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+ * status. 0x3f is the exception bits in these regs, 0x200 is the
-+ * C1 reg you need in case of a stack fault, 0x040 is the stack
-+ * fault bit. We should only be taking one exception at a time,
-+ * so if this combination doesn't produce any single exception,
-+ * then we have a bad program that isn't synchronizing its FPU usage
-+ * and it will suffer the consequences since we won't be able to
-+ * fully reproduce the context of the exception
-+ */
-+ cwd = get_fpu_cwd(task);
-+ swd = get_fpu_swd(task);
-+ switch (swd & ~cwd & 0x3f) {
-+ case 0x000:
-+ default:
-+ break;
-+ case 0x001: /* Invalid Op */
-+ /*
-+ * swd & 0x240 == 0x040: Stack Underflow
-+ * swd & 0x240 == 0x240: Stack Overflow
-+ * User must clear the SF bit (0x40) if set
-+ */
-+ info.si_code = FPE_FLTINV;
-+ break;
-+ case 0x002: /* Denormalize */
-+ case 0x010: /* Underflow */
-+ info.si_code = FPE_FLTUND;
-+ break;
-+ case 0x004: /* Zero Divide */
-+ info.si_code = FPE_FLTDIV;
-+ break;
-+ case 0x008: /* Overflow */
-+ info.si_code = FPE_FLTOVF;
-+ break;
-+ case 0x020: /* Precision */
-+ info.si_code = FPE_FLTRES;
-+ break;
-+ }
-+ force_sig_info(SIGFPE, &info, task);
-+}
-+
-+asmlinkage void bad_intr(void)
-+{
-+ printk("bad interrupt");
-+}
-+
-+asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
-+{
-+ void __user *rip = (void __user *)(regs->rip);
-+ struct task_struct * task;
-+ siginfo_t info;
-+ unsigned short mxcsr;
-+
-+ conditional_sti(regs);
-+ if (!user_mode(regs) &&
-+ kernel_math_error(regs, "kernel simd math error", 19))
-+ return;
-+
-+ /*
-+ * Save the info for the exception handler and clear the error.
-+ */
-+ task = current;
-+ save_init_fpu(task);
-+ task->thread.trap_no = 19;
-+ task->thread.error_code = 0;
-+ info.si_signo = SIGFPE;
-+ info.si_errno = 0;
-+ info.si_code = __SI_FAULT;
-+ info.si_addr = rip;
-+ /*
-+ * The SIMD FPU exceptions are handled a little differently, as there
-+ * is only a single status/control register. Thus, to determine which
-+ * unmasked exception was caught we must mask the exception mask bits
-+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+ */
-+ mxcsr = get_fpu_mxcsr(task);
-+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+ case 0x000:
-+ default:
-+ break;
-+ case 0x001: /* Invalid Op */
-+ info.si_code = FPE_FLTINV;
-+ break;
-+ case 0x002: /* Denormalize */
-+ case 0x010: /* Underflow */
-+ info.si_code = FPE_FLTUND;
-+ break;
-+ case 0x004: /* Zero Divide */
-+ info.si_code = FPE_FLTDIV;
-+ break;
-+ case 0x008: /* Overflow */
-+ info.si_code = FPE_FLTOVF;
-+ break;
-+ case 0x020: /* Precision */
-+ info.si_code = FPE_FLTRES;
-+ break;
-+ }
-+ force_sig_info(SIGFPE, &info, task);
-+}
-+
-+asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
-+{
-+}
-+
-+#if 0
-+asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
-+{
-+}
-+#endif
-+
-+asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
-+{
-+}
-+
-+/*
-+ * 'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ */
-+asmlinkage void math_state_restore(void)
-+{
-+ struct task_struct *me = current;
-+ /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
-+
-+ if (!used_math())
-+ init_fpu(me);
-+ restore_fpu_checking(&me->thread.i387.fxsave);
-+ task_thread_info(me)->status |= TS_USEDFPU;
-+}
-+
-+
-+/*
-+ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
-+ * specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+ { 0, 0|4, __KERNEL_CS, (unsigned long)divide_error },
-+ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
-+ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
-+ { 4, 3|4, __KERNEL_CS, (unsigned long)overflow },
-+ { 5, 0|4, __KERNEL_CS, (unsigned long)bounds },
-+ { 6, 0|4, __KERNEL_CS, (unsigned long)invalid_op },
-+ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
-+ { 9, 0|4, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun},
-+ { 10, 0|4, __KERNEL_CS, (unsigned long)invalid_TSS },
-+ { 11, 0|4, __KERNEL_CS, (unsigned long)segment_not_present },
-+ { 12, 0|4, __KERNEL_CS, (unsigned long)stack_segment },
-+ { 13, 0|4, __KERNEL_CS, (unsigned long)general_protection },
-+ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
-+ { 15, 0|4, __KERNEL_CS, (unsigned long)spurious_interrupt_bug },
-+ { 16, 0|4, __KERNEL_CS, (unsigned long)coprocessor_error },
-+ { 17, 0|4, __KERNEL_CS, (unsigned long)alignment_check },
-+#ifdef CONFIG_X86_MCE
-+ { 18, 0|4, __KERNEL_CS, (unsigned long)machine_check },
-+#endif
-+ { 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
-+#ifdef CONFIG_IA32_EMULATION
-+ { IA32_SYSCALL_VECTOR, 3|4, __KERNEL_CS, (unsigned long)ia32_syscall},
-+#endif
-+ { 0, 0, 0, 0 }
-+};
-+
-+void __init trap_init(void)
-+{
-+ int ret;
-+
-+ ret = HYPERVISOR_set_trap_table(trap_table);
-+
-+ if (ret)
-+ printk("HYPERVISOR_set_trap_table faild: error %d\n",
-+ ret);
-+
-+ /*
-+ * Should be a barrier for any external CPU state.
-+ */
-+ cpu_init();
-+}
-+
-+void smp_trap_init(trap_info_t *trap_ctxt)
-+{
-+ trap_info_t *t = trap_table;
-+
-+ for (t = trap_table; t->address; t++) {
-+ trap_ctxt[t->vector].flags = t->flags;
-+ trap_ctxt[t->vector].cs = t->cs;
-+ trap_ctxt[t->vector].address = t->address;
-+ }
-+}
-+
-+
-+/* Actual parsing is done early in setup.c. */
-+static int __init oops_dummy(char *s)
-+{
-+ panic_on_oops = 1;
-+ return -1;
-+}
-+__setup("oops=", oops_dummy);
-+
-+static int __init kstack_setup(char *s)
-+{
-+ kstack_depth_to_print = simple_strtoul(s,NULL,0);
-+ return 0;
-+}
-+__setup("kstack=", kstack_setup);
-+
-diff --git a/arch/x86_64/kernel/vsyscall-xen.c b/arch/x86_64/kernel/vsyscall-xen.c
-new file mode 100644
-index 0000000..190834b
---- /dev/null
-+++ b/arch/x86_64/kernel/vsyscall-xen.c
-@@ -0,0 +1,239 @@
-+/*
-+ * linux/arch/x86_64/kernel/vsyscall.c
-+ *
-+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
-+ * Copyright 2003 Andi Kleen, SuSE Labs.
-+ *
-+ * Thanks to hpa@transmeta.com for some useful hint.
-+ * Special thanks to Ingo Molnar for his early experience with
-+ * a different vsyscall implementation for Linux/IA32 and for the name.
-+ *
-+ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
-+ * at virtual address -10Mbyte+1024bytes etc... There are at max 4
-+ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
-+ * jumping out of line if necessary. We cannot add more with this
-+ * mechanism because older kernels won't return -ENOSYS.
-+ * If we want more than four we need a vDSO.
-+ *
-+ * Note: the concept clashes with user mode linux. If you use UML and
-+ * want per guest time just set the kernel.vsyscall64 sysctl to 0.
-+ */
-+
-+#include <linux/time.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/timer.h>
-+#include <linux/seqlock.h>
-+#include <linux/jiffies.h>
-+#include <linux/sysctl.h>
-+
-+#include <asm/vsyscall.h>
-+#include <asm/pgtable.h>
-+#include <asm/page.h>
-+#include <asm/fixmap.h>
-+#include <asm/errno.h>
-+#include <asm/io.h>
-+
-+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
-+
-+int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
-+seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
-+
-+#include <asm/unistd.h>
-+
-+static __always_inline void timeval_normalize(struct timeval * tv)
-+{
-+ time_t __sec;
-+
-+ __sec = tv->tv_usec / 1000000;
-+ if (__sec) {
-+ tv->tv_usec %= 1000000;
-+ tv->tv_sec += __sec;
-+ }
-+}
-+
-+static __always_inline void do_vgettimeofday(struct timeval * tv)
-+{
-+ long sequence, t;
-+ unsigned long sec, usec;
-+
-+ do {
-+ sequence = read_seqbegin(&__xtime_lock);
-+
-+ sec = __xtime.tv_sec;
-+ usec = (__xtime.tv_nsec / 1000) +
-+ (__jiffies - __wall_jiffies) * (1000000 / HZ);
-+
-+ if (__vxtime.mode != VXTIME_HPET) {
-+ t = get_cycles_sync();
-+ if (t < __vxtime.last_tsc)
-+ t = __vxtime.last_tsc;
-+ usec += ((t - __vxtime.last_tsc) *
-+ __vxtime.tsc_quot) >> 32;
-+ /* See comment in x86_64 do_gettimeofday. */
-+ } else {
-+ usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
-+ __vxtime.last) * __vxtime.quot) >> 32;
-+ }
-+ } while (read_seqretry(&__xtime_lock, sequence));
-+
-+ tv->tv_sec = sec + usec / 1000000;
-+ tv->tv_usec = usec % 1000000;
-+}
-+
-+/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
-+static __always_inline void do_get_tz(struct timezone * tz)
-+{
-+ *tz = __sys_tz;
-+}
-+
-+static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
-+{
-+ int ret;
-+ asm volatile("vsysc2: syscall"
-+ : "=a" (ret)
-+ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
-+ return ret;
-+}
-+
-+static __always_inline long time_syscall(long *t)
-+{
-+ long secs;
-+ asm volatile("vsysc1: syscall"
-+ : "=a" (secs)
-+ : "0" (__NR_time),"D" (t) : __syscall_clobber);
-+ return secs;
-+}
-+
-+int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
-+{
-+ if (unlikely(!__sysctl_vsyscall))
-+ return gettimeofday(tv,tz);
-+ if (tv)
-+ do_vgettimeofday(tv);
-+ if (tz)
-+ do_get_tz(tz);
-+ return 0;
-+}
-+
-+/* This will break when the xtime seconds get inaccurate, but that is
-+ * unlikely */
-+time_t __vsyscall(1) vtime(time_t *t)
-+{
-+ if (unlikely(!__sysctl_vsyscall))
-+ return time_syscall(t);
-+ else if (t)
-+ *t = __xtime.tv_sec;
-+ return __xtime.tv_sec;
-+}
-+
-+long __vsyscall(2) venosys_0(void)
-+{
-+ return -ENOSYS;
-+}
-+
-+long __vsyscall(3) venosys_1(void)
-+{
-+ return -ENOSYS;
-+}
-+
-+#ifdef CONFIG_SYSCTL
-+
-+#define SYSCALL 0x050f
-+#define NOP2 0x9090
-+
-+/*
-+ * NOP out syscall in vsyscall page when not needed.
-+ */
-+static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ extern u16 vsysc1, vsysc2;
-+ u16 *map1, *map2;
-+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-+ if (!write)
-+ return ret;
-+ /* gcc has some trouble with __va(__pa()), so just do it this
-+ way. */
-+ map1 = ioremap(__pa_symbol(&vsysc1), 2);
-+ if (!map1)
-+ return -ENOMEM;
-+ map2 = ioremap(__pa_symbol(&vsysc2), 2);
-+ if (!map2) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ if (!sysctl_vsyscall) {
-+ *map1 = SYSCALL;
-+ *map2 = SYSCALL;
-+ } else {
-+ *map1 = NOP2;
-+ *map2 = NOP2;
-+ }
-+ iounmap(map2);
-+out:
-+ iounmap(map1);
-+ return ret;
-+}
-+
-+static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
-+ void __user *oldval, size_t __user *oldlenp,
-+ void __user *newval, size_t newlen,
-+ void **context)
-+{
-+ return -ENOSYS;
-+}
-+
-+static ctl_table kernel_table2[] = {
-+ { .ctl_name = 99, .procname = "vsyscall64",
-+ .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
-+ .strategy = vsyscall_sysctl_nostrat,
-+ .proc_handler = vsyscall_sysctl_change },
-+ { 0, }
-+};
-+
-+static ctl_table kernel_root_table2[] = {
-+ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
-+ .child = kernel_table2 },
-+ { 0 },
-+};
-+
-+#endif
-+
-+static void __init map_vsyscall(void)
-+{
-+ extern char __vsyscall_0;
-+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
-+
-+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
-+}
-+
-+#ifdef CONFIG_XEN
-+static void __init map_vsyscall_user(void)
-+{
-+ extern void __set_fixmap_user(enum fixed_addresses, unsigned long, pgprot_t);
-+ extern char __vsyscall_0;
-+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
-+
-+ __set_fixmap_user(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
-+}
-+#endif
-+
-+static int __init vsyscall_init(void)
-+{
-+ BUG_ON(((unsigned long) &vgettimeofday !=
-+ VSYSCALL_ADDR(__NR_vgettimeofday)));
-+ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
-+ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
-+ map_vsyscall();
-+#ifdef CONFIG_XEN
-+ map_vsyscall_user();
-+ sysctl_vsyscall = 0; /* disable vgettimeofay() */
-+#endif
-+#ifdef CONFIG_SYSCTL
-+ register_sysctl_table(kernel_root_table2, 0);
-+#endif
-+ return 0;
-+}
-+
-+__initcall(vsyscall_init);
-diff --git a/arch/x86_64/kernel/x8664_ksyms-xen.c b/arch/x86_64/kernel/x8664_ksyms-xen.c
-new file mode 100644
-index 0000000..d6f9285
---- /dev/null
-+++ b/arch/x86_64/kernel/x8664_ksyms-xen.c
-@@ -0,0 +1,163 @@
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/smp.h>
-+#include <linux/user.h>
-+#include <linux/sched.h>
-+#include <linux/in6.h>
-+#include <linux/interrupt.h>
-+#include <linux/smp_lock.h>
-+#include <linux/pm.h>
-+#include <linux/pci.h>
-+#include <linux/apm_bios.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/syscalls.h>
-+#include <linux/tty.h>
-+
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/uaccess.h>
-+#include <asm/checksum.h>
-+#include <asm/io.h>
-+#include <asm/delay.h>
-+#include <asm/irq.h>
-+#include <asm/mmx.h>
-+#include <asm/desc.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/nmi.h>
-+#include <asm/kdebug.h>
-+#include <asm/unistd.h>
-+#include <asm/tlbflush.h>
-+#include <asm/kdebug.h>
-+
-+#ifdef CONFIG_SMP
-+extern void __write_lock_failed(rwlock_t *rw);
-+extern void __read_lock_failed(rwlock_t *rw);
-+#endif
-+
-+/* platform dependent support */
-+EXPORT_SYMBOL(boot_cpu_data);
-+//EXPORT_SYMBOL(dump_fpu);
-+EXPORT_SYMBOL(kernel_thread);
-+EXPORT_SYMBOL(pm_idle);
-+EXPORT_SYMBOL(pm_power_off);
-+
-+EXPORT_SYMBOL(__down_failed);
-+EXPORT_SYMBOL(__down_failed_interruptible);
-+EXPORT_SYMBOL(__down_failed_trylock);
-+EXPORT_SYMBOL(__up_wakeup);
-+/* Networking helper routines. */
-+EXPORT_SYMBOL(csum_partial_copy_nocheck);
-+EXPORT_SYMBOL(ip_compute_csum);
-+/* Delay loops */
-+EXPORT_SYMBOL(__udelay);
-+EXPORT_SYMBOL(__ndelay);
-+EXPORT_SYMBOL(__delay);
-+EXPORT_SYMBOL(__const_udelay);
-+
-+EXPORT_SYMBOL(__get_user_1);
-+EXPORT_SYMBOL(__get_user_2);
-+EXPORT_SYMBOL(__get_user_4);
-+EXPORT_SYMBOL(__get_user_8);
-+EXPORT_SYMBOL(__put_user_1);
-+EXPORT_SYMBOL(__put_user_2);
-+EXPORT_SYMBOL(__put_user_4);
-+EXPORT_SYMBOL(__put_user_8);
-+
-+EXPORT_SYMBOL(strncpy_from_user);
-+EXPORT_SYMBOL(__strncpy_from_user);
-+EXPORT_SYMBOL(clear_user);
-+EXPORT_SYMBOL(__clear_user);
-+EXPORT_SYMBOL(copy_user_generic);
-+EXPORT_SYMBOL(copy_from_user);
-+EXPORT_SYMBOL(copy_to_user);
-+EXPORT_SYMBOL(copy_in_user);
-+EXPORT_SYMBOL(strnlen_user);
-+
-+#ifdef CONFIG_PCI
-+EXPORT_SYMBOL(pci_mem_start);
-+#endif
-+
-+EXPORT_SYMBOL(copy_page);
-+EXPORT_SYMBOL(clear_page);
-+
-+EXPORT_SYMBOL(_cpu_pda);
-+#ifdef CONFIG_SMP
-+EXPORT_SYMBOL(__write_lock_failed);
-+EXPORT_SYMBOL(__read_lock_failed);
-+
-+EXPORT_SYMBOL(smp_call_function);
-+#endif
-+
-+#ifdef CONFIG_VT
-+EXPORT_SYMBOL(screen_info);
-+#endif
-+
-+EXPORT_SYMBOL(get_wchan);
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+EXPORT_SYMBOL_GPL(set_nmi_callback);
-+EXPORT_SYMBOL_GPL(unset_nmi_callback);
-+#endif
-+
-+/* Export string functions. We normally rely on gcc builtin for most of these,
-+ but gcc sometimes decides not to inline them. */
-+#undef memcpy
-+#undef memset
-+#undef memmove
-+#undef strlen
-+
-+extern void * memset(void *,int,__kernel_size_t);
-+extern size_t strlen(const char *);
-+extern void * memmove(void * dest,const void *src,size_t count);
-+extern void * memcpy(void *,const void *,__kernel_size_t);
-+extern void * __memcpy(void *,const void *,__kernel_size_t);
-+
-+EXPORT_SYMBOL(memset);
-+EXPORT_SYMBOL(strlen);
-+EXPORT_SYMBOL(memmove);
-+EXPORT_SYMBOL(memcpy);
-+EXPORT_SYMBOL(__memcpy);
-+
-+#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
-+/* prototypes are wrong, these are assembly with custom calling functions */
-+extern void rwsem_down_read_failed_thunk(void);
-+extern void rwsem_wake_thunk(void);
-+extern void rwsem_downgrade_thunk(void);
-+extern void rwsem_down_write_failed_thunk(void);
-+EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
-+EXPORT_SYMBOL(rwsem_wake_thunk);
-+EXPORT_SYMBOL(rwsem_downgrade_thunk);
-+EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
-+#endif
-+
-+EXPORT_SYMBOL(empty_zero_page);
-+
-+EXPORT_SYMBOL(die_chain);
-+EXPORT_SYMBOL(register_die_notifier);
-+
-+#ifdef CONFIG_SMP
-+EXPORT_SYMBOL(cpu_sibling_map);
-+EXPORT_SYMBOL(smp_num_siblings);
-+#endif
-+
-+extern void do_softirq_thunk(void);
-+EXPORT_SYMBOL(do_softirq_thunk);
-+
-+#ifdef CONFIG_BUG
-+EXPORT_SYMBOL(out_of_line_bug);
-+#endif
-+
-+EXPORT_SYMBOL(init_level4_pgt);
-+
-+extern unsigned long __supported_pte_mask;
-+EXPORT_SYMBOL(__supported_pte_mask);
-+
-+#ifdef CONFIG_SMP
-+EXPORT_SYMBOL(flush_tlb_page);
-+#endif
-+
-+EXPORT_SYMBOL(load_gs_index);
-+
-diff --git a/arch/x86_64/kernel/xen_entry.S b/arch/x86_64/kernel/xen_entry.S
-new file mode 100644
-index 0000000..9b42d15
---- /dev/null
-+++ b/arch/x86_64/kernel/xen_entry.S
-@@ -0,0 +1,41 @@
-+/*
-+ * Copied from arch/xen/i386/kernel/entry.S
-+ */
-+/* Offsets into shared_info_t. */
-+#define evtchn_upcall_pending 0
-+#define evtchn_upcall_mask 1
-+
-+#define sizeof_vcpu_shift 6
-+
-+#ifdef CONFIG_SMP
-+//#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
-+//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
-+#define preempt_disable(reg)
-+#define preempt_enable(reg)
-+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
-+ movq %gs:pda_cpunumber,reg ; \
-+ shl $32, reg ; \
-+ shr $32-sizeof_vcpu_shift,reg ; \
-+ addq HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
-+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
-+#else
-+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg)
-+#define XEN_PUT_VCPU_INFO_fixup
-+#endif
-+
-+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
-+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
-+ XEN_PUT_VCPU_INFO(reg)
-+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
-+ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
-+ XEN_PUT_VCPU_INFO(reg)
-+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
-+
-+EVENT_MASK = (CS+4)
-+VGCF_IN_SYSCALL = (1<<8)
-+
-+
-diff --git a/arch/x86_64/mm/Makefile b/arch/x86_64/mm/Makefile
-index d25ac86..7d6ceae 100644
---- a/arch/x86_64/mm/Makefile
-+++ b/arch/x86_64/mm/Makefile
-@@ -9,3 +9,13 @@ obj-$(CONFIG_K8_NUMA) += k8topology.o
- obj-$(CONFIG_ACPI_NUMA) += srat.o
-
- hugetlbpage-y = ../../i386/mm/hugetlbpage.o
-+
-+ifdef CONFIG_XEN
-+include $(srctree)/scripts/Makefile.xen
-+
-+ioremap-y += ../../i386/mm/ioremap-xen.o
-+hypervisor-y += ../../i386/mm/hypervisor.o
-+obj-y += hypervisor.o
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff --git a/arch/x86_64/mm/fault-xen.c b/arch/x86_64/mm/fault-xen.c
-new file mode 100644
-index 0000000..dfd2f2d
---- /dev/null
-+++ b/arch/x86_64/mm/fault-xen.c
-@@ -0,0 +1,595 @@
-+/*
-+ * linux/arch/x86-64/mm/fault.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h> /* For unblank_screen() */
-+#include <linux/compiler.h>
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
-+
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgalloc.h>
-+#include <asm/smp.h>
-+#include <asm/tlbflush.h>
-+#include <asm/proto.h>
-+#include <asm/kdebug.h>
-+#include <asm-generic/sections.h>
-+
-+/* Page fault error code bits */
-+#define PF_PROT (1<<0) /* or no page found */
-+#define PF_WRITE (1<<1)
-+#define PF_USER (1<<2)
-+#define PF_RSVD (1<<3)
-+#define PF_INSTR (1<<4)
-+
-+void bust_spinlocks(int yes)
-+{
-+ int loglevel_save = console_loglevel;
-+ if (yes) {
-+ oops_in_progress = 1;
-+ } else {
-+#ifdef CONFIG_VT
-+ unblank_screen();
-+#endif
-+ oops_in_progress = 0;
-+ /*
-+ * OK, the message is on the console. Now we call printk()
-+ * without oops_in_progress set so that printk will give klogd
-+ * a poke. Hold onto your hats...
-+ */
-+ console_loglevel = 15; /* NMI oopser may have shut the console up */
-+ printk(" ");
-+ console_loglevel = loglevel_save;
-+ }
-+}
-+
-+/* Sometimes the CPU reports invalid exceptions on prefetch.
-+ Check that here and ignore.
-+ Opcode checker based on code by Richard Brunner */
-+static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+ unsigned long error_code)
-+{
-+ unsigned char *instr;
-+ int scan_more = 1;
-+ int prefetch = 0;
-+ unsigned char *max_instr;
-+
-+ /* If it was a exec fault ignore */
-+ if (error_code & PF_INSTR)
-+ return 0;
-+
-+ instr = (unsigned char *)convert_rip_to_linear(current, regs);
-+ max_instr = instr + 15;
-+
-+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
-+ return 0;
-+
-+ while (scan_more && instr < max_instr) {
-+ unsigned char opcode;
-+ unsigned char instr_hi;
-+ unsigned char instr_lo;
-+
-+ if (__get_user(opcode, instr))
-+ break;
-+
-+ instr_hi = opcode & 0xf0;
-+ instr_lo = opcode & 0x0f;
-+ instr++;
-+
-+ switch (instr_hi) {
-+ case 0x20:
-+ case 0x30:
-+ /* Values 0x26,0x2E,0x36,0x3E are valid x86
-+ prefixes. In long mode, the CPU will signal
-+ invalid opcode if some of these prefixes are
-+ present so we will never get here anyway */
-+ scan_more = ((instr_lo & 7) == 0x6);
-+ break;
-+
-+ case 0x40:
-+ /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
-+ Need to figure out under what instruction mode the
-+ instruction was issued ... */
-+ /* Could check the LDT for lm, but for now it's good
-+ enough to assume that long mode only uses well known
-+ segments or kernel. */
-+ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
-+ break;
-+
-+ case 0x60:
-+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
-+ scan_more = (instr_lo & 0xC) == 0x4;
-+ break;
-+ case 0xF0:
-+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
-+ scan_more = !instr_lo || (instr_lo>>1) == 1;
-+ break;
-+ case 0x00:
-+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
-+ scan_more = 0;
-+ if (__get_user(opcode, instr))
-+ break;
-+ prefetch = (instr_lo == 0xF) &&
-+ (opcode == 0x0D || opcode == 0x18);
-+ break;
-+ default:
-+ scan_more = 0;
-+ break;
-+ }
-+ }
-+ return prefetch;
-+}
-+
-+static int bad_address(void *p)
-+{
-+ unsigned long dummy;
-+ return __get_user(dummy, (unsigned long *)p);
-+}
-+
-+void dump_pagetable(unsigned long address)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
-+ pgd += pgd_index(address);
-+ if (bad_address(pgd)) goto bad;
-+ printk("PGD %lx ", pgd_val(*pgd));
-+ if (!pgd_present(*pgd)) goto ret;
-+
-+ pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
-+ if (bad_address(pud)) goto bad;
-+ printk("PUD %lx ", pud_val(*pud));
-+ if (!pud_present(*pud)) goto ret;
-+
-+ pmd = pmd_offset(pud, address);
-+ if (bad_address(pmd)) goto bad;
-+ printk("PMD %lx ", pmd_val(*pmd));
-+ if (!pmd_present(*pmd)) goto ret;
-+
-+ pte = pte_offset_kernel(pmd, address);
-+ if (bad_address(pte)) goto bad;
-+ printk("PTE %lx", pte_val(*pte));
-+ret:
-+ printk("\n");
-+ return;
-+bad:
-+ printk("BAD\n");
-+}
-+
-+static const char errata93_warning[] =
-+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
-+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
-+KERN_ERR "******* Please consider a BIOS update.\n"
-+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
-+
-+/* Workaround for K8 erratum #93 & buggy BIOS.
-+ BIOS SMM functions are required to use a specific workaround
-+ to avoid corruption of the 64bit RIP register on C stepping K8.
-+ A lot of BIOS that didn't get tested properly miss this.
-+ The OS sees this as a page fault with the upper 32bits of RIP cleared.
-+ Try to work around it here.
-+ Note we only handle faults in kernel here. */
-+
-+static int is_errata93(struct pt_regs *regs, unsigned long address)
-+{
-+ static int warned;
-+ if (address != regs->rip)
-+ return 0;
-+ if ((address >> 32) != 0)
-+ return 0;
-+ address |= 0xffffffffUL << 32;
-+ if ((address >= (u64)_stext && address <= (u64)_etext) ||
-+ (address >= MODULES_VADDR && address <= MODULES_END)) {
-+ if (!warned) {
-+ printk(errata93_warning);
-+ warned = 1;
-+ }
-+ regs->rip = address;
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+int unhandled_signal(struct task_struct *tsk, int sig)
-+{
-+ if (tsk->pid == 1)
-+ return 1;
-+ if (tsk->ptrace & PT_PTRACED)
-+ return 0;
-+ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
-+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
-+}
-+
-+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
-+ unsigned long error_code)
-+{
-+ unsigned long flags = oops_begin();
-+ struct task_struct *tsk;
-+
-+ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
-+ current->comm, address);
-+ dump_pagetable(address);
-+ tsk = current;
-+ tsk->thread.cr2 = address;
-+ tsk->thread.trap_no = 14;
-+ tsk->thread.error_code = error_code;
-+ __die("Bad pagetable", regs, error_code);
-+ oops_end(flags);
-+ do_exit(SIGKILL);
-+}
-+
-+/*
-+ * Handle a fault on the vmalloc area
-+ *
-+ * This assumes no large pages in there.
-+ */
-+static int vmalloc_fault(unsigned long address)
-+{
-+ pgd_t *pgd, *pgd_ref;
-+ pud_t *pud, *pud_ref;
-+ pmd_t *pmd, *pmd_ref;
-+ pte_t *pte, *pte_ref;
-+
-+ /* Copy kernel mappings over when needed. This can also
-+ happen within a race in page table update. In the later
-+ case just flush. */
-+
-+ /* On Xen the line below does not always work. Needs investigating! */
-+ /*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
-+ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
-+ pgd += pgd_index(address);
-+ pgd_ref = pgd_offset_k(address);
-+ if (pgd_none(*pgd_ref))
-+ return -1;
-+ if (pgd_none(*pgd))
-+ set_pgd(pgd, *pgd_ref);
-+
-+ /* Below here mismatches are bugs because these lower tables
-+ are shared */
-+
-+ pud = pud_offset(pgd, address);
-+ pud_ref = pud_offset(pgd_ref, address);
-+ if (pud_none(*pud_ref))
-+ return -1;
-+ if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
-+ BUG();
-+ pmd = pmd_offset(pud, address);
-+ pmd_ref = pmd_offset(pud_ref, address);
-+ if (pmd_none(*pmd_ref))
-+ return -1;
-+ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
-+ BUG();
-+ pte_ref = pte_offset_kernel(pmd_ref, address);
-+ if (!pte_present(*pte_ref))
-+ return -1;
-+ pte = pte_offset_kernel(pmd, address);
-+ /* Don't use pte_page here, because the mappings can point
-+ outside mem_map, and the NUMA hash lookup cannot handle
-+ that. */
-+ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
-+ BUG();
-+ return 0;
-+}
-+
-+int page_fault_trace = 0;
-+int exception_trace = 1;
-+
-+
-+#define MEM_VERBOSE 1
-+
-+#ifdef MEM_VERBOSE
-+#define MEM_LOG(_f, _a...) \
-+ printk("fault.c:[%d]-> " _f "\n", \
-+ __LINE__ , ## _a )
-+#else
-+#define MEM_LOG(_f, _a...) ((void)0)
-+#endif
-+
-+/*
-+ * This routine handles page faults. It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ */
-+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
-+ unsigned long error_code)
-+{
-+ struct task_struct *tsk;
-+ struct mm_struct *mm;
-+ struct vm_area_struct * vma;
-+ unsigned long address;
-+ const struct exception_table_entry *fixup;
-+ int write;
-+ unsigned long flags;
-+ siginfo_t info;
-+
-+ if (!user_mode(regs))
-+ error_code &= ~PF_USER; /* means kernel */
-+
-+ /* get the address */
-+ address = HYPERVISOR_shared_info->vcpu_info[
-+ smp_processor_id()].arch.cr2;
-+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+ SIGSEGV) == NOTIFY_STOP)
-+ return;
-+
-+ if (likely(regs->eflags & X86_EFLAGS_IF))
-+ local_irq_enable();
-+
-+ if (unlikely(page_fault_trace))
-+ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
-+ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
-+
-+ tsk = current;
-+ mm = tsk->mm;
-+ info.si_code = SEGV_MAPERR;
-+
-+
-+ /*
-+ * We fault-in kernel-space virtual memory on-demand. The
-+ * 'reference' page table is init_mm.pgd.
-+ *
-+ * NOTE! We MUST NOT take any locks for this case. We may
-+ * be in an interrupt or a critical region, and should
-+ * only copy the information from the master page table,
-+ * nothing more.
-+ *
-+ * This verifies that the fault happens in kernel space
-+ * (error_code & 4) == 0, and that the fault was not a
-+ * protection error (error_code & 9) == 0.
-+ */
-+ if (unlikely(address >= TASK_SIZE64)) {
-+ /*
-+ * Must check for the entire kernel range here: with writable
-+ * page tables the hypervisor may temporarily clear PMD
-+ * entries.
-+ */
-+ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
-+ address >= PAGE_OFFSET) {
-+ if (vmalloc_fault(address) < 0)
-+ goto bad_area_nosemaphore;
-+ return;
-+ }
-+ /*
-+ * Don't take the mm semaphore here. If we fixup a prefetch
-+ * fault we could otherwise deadlock.
-+ */
-+ goto bad_area_nosemaphore;
-+ }
-+
-+ if (unlikely(error_code & PF_RSVD))
-+ pgtable_bad(address, regs, error_code);
-+
-+ /*
-+ * If we're in an interrupt or have no user
-+ * context, we must not take the fault..
-+ */
-+ if (unlikely(in_atomic() || !mm))
-+ goto bad_area_nosemaphore;
-+
-+ again:
-+ /* When running in the kernel we expect faults to occur only to
-+ * addresses in user space. All other faults represent errors in the
-+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
-+ * erroneous fault occuring in a code path which already holds mmap_sem
-+ * we will deadlock attempting to validate the fault against the
-+ * address space. Luckily the kernel only validly references user
-+ * space from well defined areas of code, which are listed in the
-+ * exceptions table.
-+ *
-+ * As the vast majority of faults will be valid we will only perform
-+ * the source reference check when there is a possibilty of a deadlock.
-+ * Attempt to lock the address space, if we cannot we then validate the
-+ * source. If this is invalid we can skip the address space check,
-+ * thus avoiding the deadlock.
-+ */
-+ if (!down_read_trylock(&mm->mmap_sem)) {
-+ if ((error_code & PF_USER) == 0 &&
-+ !search_exception_tables(regs->rip))
-+ goto bad_area_nosemaphore;
-+ down_read(&mm->mmap_sem);
-+ }
-+
-+ vma = find_vma(mm, address);
-+ if (!vma)
-+ goto bad_area;
-+ if (likely(vma->vm_start <= address))
-+ goto good_area;
-+ if (!(vma->vm_flags & VM_GROWSDOWN))
-+ goto bad_area;
-+ if (error_code & 4) {
-+ // XXX: align red zone size with ABI
-+ if (address + 128 < regs->rsp)
-+ goto bad_area;
-+ }
-+ if (expand_stack(vma, address))
-+ goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+ info.si_code = SEGV_ACCERR;
-+ write = 0;
-+ switch (error_code & (PF_PROT|PF_WRITE)) {
-+ default: /* 3: write, present */
-+ /* fall through */
-+ case PF_WRITE: /* write, not present */
-+ if (!(vma->vm_flags & VM_WRITE))
-+ goto bad_area;
-+ write++;
-+ break;
-+ case PF_PROT: /* read, present */
-+ goto bad_area;
-+ case 0: /* read, not present */
-+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-+ goto bad_area;
-+ }
-+
-+ /*
-+ * If for any reason at all we couldn't handle the fault,
-+ * make sure we exit gracefully rather than endlessly redo
-+ * the fault.
-+ */
-+ switch (handle_mm_fault(mm, vma, address, write)) {
-+ case VM_FAULT_MINOR:
-+ tsk->min_flt++;
-+ break;
-+ case VM_FAULT_MAJOR:
-+ tsk->maj_flt++;
-+ break;
-+ case VM_FAULT_SIGBUS:
-+ goto do_sigbus;
-+ default:
-+ goto out_of_memory;
-+ }
-+
-+ up_read(&mm->mmap_sem);
-+ return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+ up_read(&mm->mmap_sem);
-+
-+bad_area_nosemaphore:
-+ /* User mode accesses just cause a SIGSEGV */
-+ if (error_code & PF_USER) {
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+ /* Work around K8 erratum #100 K8 in compat mode
-+ occasionally jumps to illegal addresses >4GB. We
-+ catch this here in the page fault handler because
-+ these addresses are not reachable. Just detect this
-+ case and return. Any code segment in LDT is
-+ compatibility mode. */
-+ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
-+ (address >> 32))
-+ return;
-+
-+ if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
-+ printk(
-+ "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
-+ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
-+ tsk->comm, tsk->pid, address, regs->rip,
-+ regs->rsp, error_code);
-+ }
-+
-+ tsk->thread.cr2 = address;
-+ /* Kernel addresses are always protection faults */
-+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+ tsk->thread.trap_no = 14;
-+ info.si_signo = SIGSEGV;
-+ info.si_errno = 0;
-+ /* info.si_code has been set above */
-+ info.si_addr = (void __user *)address;
-+ force_sig_info(SIGSEGV, &info, tsk);
-+ return;
-+ }
-+
-+no_context:
-+
-+ /* Are we prepared to handle this kernel fault? */
-+ fixup = search_exception_tables(regs->rip);
-+ if (fixup) {
-+ regs->rip = fixup->fixup;
-+ return;
-+ }
-+
-+ /*
-+ * Hall of shame of CPU/BIOS bugs.
-+ */
-+
-+ if (is_prefetch(regs, address, error_code))
-+ return;
-+
-+ if (is_errata93(regs, address))
-+ return;
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
-+
-+ flags = oops_begin();
-+
-+ if (address < PAGE_SIZE)
-+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-+ else
-+ printk(KERN_ALERT "Unable to handle kernel paging request");
-+ printk(" at %016lx RIP: \n" KERN_ALERT,address);
-+ printk_address(regs->rip);
-+ printk("\n");
-+ dump_pagetable(address);
-+ tsk->thread.cr2 = address;
-+ tsk->thread.trap_no = 14;
-+ tsk->thread.error_code = error_code;
-+ __die("Oops", regs, error_code);
-+ /* Executive summary in case the body of the oops scrolled away */
-+ printk(KERN_EMERG "CR2: %016lx\n", address);
-+ oops_end(flags);
-+ do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+ up_read(&mm->mmap_sem);
-+ if (current->pid == 1) {
-+ yield();
-+ goto again;
-+ }
-+ printk("VM: killing process %s\n", tsk->comm);
-+ if (error_code & 4)
-+ do_exit(SIGKILL);
-+ goto no_context;
-+
-+do_sigbus:
-+ up_read(&mm->mmap_sem);
-+
-+ /* Kernel mode? Handle exceptions or die */
-+ if (!(error_code & PF_USER))
-+ goto no_context;
-+
-+ tsk->thread.cr2 = address;
-+ tsk->thread.error_code = error_code;
-+ tsk->thread.trap_no = 14;
-+ info.si_signo = SIGBUS;
-+ info.si_errno = 0;
-+ info.si_code = BUS_ADRERR;
-+ info.si_addr = (void __user *)address;
-+ force_sig_info(SIGBUS, &info, tsk);
-+ return;
-+}
-+
-+static int __init enable_pagefaulttrace(char *str)
-+{
-+ page_fault_trace = 1;
-+ return 0;
-+}
-+__setup("pagefaulttrace", enable_pagefaulttrace);
-diff --git a/arch/x86_64/mm/init-xen.c b/arch/x86_64/mm/init-xen.c
-new file mode 100644
-index 0000000..0d09c4e
---- /dev/null
-+++ b/arch/x86_64/mm/init-xen.c
-@@ -0,0 +1,1107 @@
-+/*
-+ * linux/arch/x86_64/mm/init.c
-+ *
-+ * Copyright (C) 1995 Linus Torvalds
-+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
-+ * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
-+ *
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ * Modified for Xen.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/proc_fs.h>
-+#include <linux/pci.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/module.h>
-+#include <linux/memory_hotplug.h>
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+#include <asm/sections.h>
-+#include <asm/dma-mapping.h>
-+#include <asm/swiotlb.h>
-+
-+#include <xen/features.h>
-+
-+#ifndef Dprintk
-+#define Dprintk(x...)
-+#endif
-+
-+struct dma_mapping_ops* dma_ops;
-+EXPORT_SYMBOL(dma_ops);
-+
-+extern unsigned long *contiguous_bitmap;
-+
-+static unsigned long dma_reserve __initdata;
-+
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+extern unsigned long start_pfn;
-+
-+/*
-+ * Use this until direct mapping is established, i.e. before __va() is
-+ * available in init_memory_mapping().
-+ */
-+
-+#define addr_to_page(addr, page) \
-+ (addr) &= PHYSICAL_PAGE_MASK; \
-+ (page) = ((unsigned long *) ((unsigned long) \
-+ (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \
-+ __START_KERNEL_map)))
-+
-+static void early_make_page_readonly(void *va, unsigned int feature)
-+{
-+ unsigned long addr, _va = (unsigned long)va;
-+ pte_t pte, *ptep;
-+ unsigned long *page = (unsigned long *) init_level4_pgt;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ addr = (unsigned long) page[pgd_index(_va)];
-+ addr_to_page(addr, page);
-+
-+ addr = page[pud_index(_va)];
-+ addr_to_page(addr, page);
-+
-+ addr = page[pmd_index(_va)];
-+ addr_to_page(addr, page);
-+
-+ ptep = (pte_t *) &page[pte_index(_va)];
-+
-+ pte.pte = ptep->pte & ~_PAGE_RW;
-+ if (HYPERVISOR_update_va_mapping(_va, pte, 0))
-+ BUG();
-+}
-+
-+void make_page_readonly(void *va, unsigned int feature)
-+{
-+ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
-+ unsigned long addr = (unsigned long) va;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ ptep = pte_offset_kernel(pmd, addr);
-+
-+ pte.pte = ptep->pte & ~_PAGE_RW;
-+ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
-+ xen_l1_entry_update(ptep, pte); /* fallback */
-+
-+ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
-+ make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT), feature);
-+}
-+
-+void make_page_writable(void *va, unsigned int feature)
-+{
-+ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
-+ unsigned long addr = (unsigned long) va;
-+
-+ if (xen_feature(feature))
-+ return;
-+
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ ptep = pte_offset_kernel(pmd, addr);
-+
-+ pte.pte = ptep->pte | _PAGE_RW;
-+ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
-+ xen_l1_entry_update(ptep, pte); /* fallback */
-+
-+ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
-+ make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT), feature);
-+}
-+
-+void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
-+{
-+ if (xen_feature(feature))
-+ return;
-+
-+ while (nr-- != 0) {
-+ make_page_readonly(va, feature);
-+ va = (void*)((unsigned long)va + PAGE_SIZE);
-+ }
-+}
-+
-+void make_pages_writable(void *va, unsigned nr, unsigned int feature)
-+{
-+ if (xen_feature(feature))
-+ return;
-+
-+ while (nr-- != 0) {
-+ make_page_writable(va, feature);
-+ va = (void*)((unsigned long)va + PAGE_SIZE);
-+ }
-+}
-+
-+/*
-+ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
-+ * physical space so we can cache the place of the first one and move
-+ * around without checking the pgd every time.
-+ */
-+
-+void show_mem(void)
-+{
-+ long i, total = 0, reserved = 0;
-+ long shared = 0, cached = 0;
-+ pg_data_t *pgdat;
-+ struct page *page;
-+
-+ printk(KERN_INFO "Mem-info:\n");
-+ show_free_areas();
-+ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-+
-+ for_each_pgdat(pgdat) {
-+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+ page = pfn_to_page(pgdat->node_start_pfn + i);
-+ total++;
-+ if (PageReserved(page))
-+ reserved++;
-+ else if (PageSwapCache(page))
-+ cached++;
-+ else if (page_count(page))
-+ shared += page_count(page) - 1;
-+ }
-+ }
-+ printk(KERN_INFO "%lu pages of RAM\n", total);
-+ printk(KERN_INFO "%lu reserved pages\n",reserved);
-+ printk(KERN_INFO "%lu pages shared\n",shared);
-+ printk(KERN_INFO "%lu pages swap cached\n",cached);
-+}
-+
-+/* References to section boundaries */
-+
-+int after_bootmem;
-+
-+static void *spp_getpage(void)
-+{
-+ void *ptr;
-+ if (after_bootmem)
-+ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
-+ else
-+ ptr = alloc_bootmem_pages(PAGE_SIZE);
-+ if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
-+ panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
-+
-+ Dprintk("spp_getpage %p\n", ptr);
-+ return ptr;
-+}
-+
-+#define pgd_offset_u(address) (pgd_t *)(init_level4_user_pgt + pgd_index(address))
-+
-+static inline pud_t *pud_offset_u(unsigned long address)
-+{
-+ pud_t *pud = level3_user_pgt;
-+
-+ return pud + pud_index(address);
-+}
-+
-+static void set_pte_phys(unsigned long vaddr,
-+ unsigned long phys, pgprot_t prot, int user_mode)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte, new_pte;
-+
-+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
-+
-+ pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
-+ if (pgd_none(*pgd)) {
-+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+ return;
-+ }
-+ pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
-+ if (pud_none(*pud)) {
-+ pmd = (pmd_t *) spp_getpage();
-+ make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+ xen_pmd_pin(__pa(pmd));
-+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-+ if (pmd != pmd_offset(pud, 0)) {
-+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+ return;
-+ }
-+ }
-+ pmd = pmd_offset(pud, vaddr);
-+ if (pmd_none(*pmd)) {
-+ pte = (pte_t *) spp_getpage();
-+ make_page_readonly(pte, XENFEAT_writable_page_tables);
-+ xen_pte_pin(__pa(pte));
-+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-+ if (pte != pte_offset_kernel(pmd, 0)) {
-+ printk("PAGETABLE BUG #02!\n");
-+ return;
-+ }
-+ }
-+ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
-+
-+ pte = pte_offset_kernel(pmd, vaddr);
-+ if (!pte_none(*pte) &&
-+ pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
-+ pte_ERROR(*pte);
-+ set_pte(pte, new_pte);
-+
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+static void set_pte_phys_ma(unsigned long vaddr,
-+ unsigned long phys, pgprot_t prot)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte, new_pte;
-+
-+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
-+
-+ pgd = pgd_offset_k(vaddr);
-+ if (pgd_none(*pgd)) {
-+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+ return;
-+ }
-+ pud = pud_offset(pgd, vaddr);
-+ if (pud_none(*pud)) {
-+
-+ pmd = (pmd_t *) spp_getpage();
-+ make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+ xen_pmd_pin(__pa(pmd));
-+
-+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-+
-+ if (pmd != pmd_offset(pud, 0)) {
-+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+ return;
-+ }
-+ }
-+ pmd = pmd_offset(pud, vaddr);
-+
-+ if (pmd_none(*pmd)) {
-+ pte = (pte_t *) spp_getpage();
-+ make_page_readonly(pte, XENFEAT_writable_page_tables);
-+ xen_pte_pin(__pa(pte));
-+
-+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-+ if (pte != pte_offset_kernel(pmd, 0)) {
-+ printk("PAGETABLE BUG #02!\n");
-+ return;
-+ }
-+ }
-+
-+ new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
-+ pte = pte_offset_kernel(pmd, vaddr);
-+
-+ /*
-+ * Note that the pte page is already RO, thus we want to use
-+ * xen_l1_entry_update(), not set_pte().
-+ */
-+ xen_l1_entry_update(pte,
-+ pfn_pte_ma(phys >> PAGE_SHIFT, prot));
-+
-+ /*
-+ * It's enough to flush this one mapping.
-+ * (PGE mappings get flushed as well)
-+ */
-+ __flush_tlb_one(vaddr);
-+}
-+
-+#define SET_FIXMAP_KERNEL 0
-+#define SET_FIXMAP_USER 1
-+
-+/* NOTE: this is meant to be run only at boot */
-+void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-+{
-+ unsigned long address = __fix_to_virt(idx);
-+
-+ if (idx >= __end_of_fixed_addresses) {
-+ printk("Invalid __set_fixmap\n");
-+ return;
-+ }
-+ switch (idx) {
-+ case VSYSCALL_FIRST_PAGE:
-+ set_pte_phys(address, phys, prot, SET_FIXMAP_KERNEL);
-+ break;
-+ default:
-+ set_pte_phys_ma(address, phys, prot);
-+ break;
-+ }
-+}
-+
-+/*
-+ * At this point it only supports vsyscall area.
-+ */
-+void __set_fixmap_user (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-+{
-+ unsigned long address = __fix_to_virt(idx);
-+
-+ if (idx >= __end_of_fixed_addresses) {
-+ printk("Invalid __set_fixmap\n");
-+ return;
-+ }
-+
-+ set_pte_phys(address, phys, prot, SET_FIXMAP_USER);
-+}
-+
-+unsigned long __initdata table_start, tables_space;
-+
-+unsigned long get_machine_pfn(unsigned long addr)
-+{
-+ pud_t* pud = pud_offset_k(NULL, addr);
-+ pmd_t* pmd = pmd_offset(pud, addr);
-+ pte_t *pte = pte_offset_kernel(pmd, addr);
-+
-+ return pte_mfn(*pte);
-+}
-+
-+static __meminit void *alloc_static_page(unsigned long *phys)
-+{
-+ unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
-+
-+ if (after_bootmem) {
-+ void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
-+
-+ *phys = __pa(adr);
-+ return adr;
-+ }
-+
-+ *phys = start_pfn << PAGE_SHIFT;
-+ start_pfn++;
-+ memset((void *)va, 0, PAGE_SIZE);
-+ return (void *)va;
-+}
-+
-+#define PTE_SIZE PAGE_SIZE
-+
-+static inline void __set_pte(pte_t *dst, pte_t val)
-+{
-+ *dst = val;
-+}
-+
-+static inline int make_readonly(unsigned long paddr)
-+{
-+ int readonly = 0;
-+
-+ /* Make old and new page tables read-only. */
-+ if (!xen_feature(XENFEAT_writable_page_tables)
-+ && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
-+ && (paddr < ((table_start << PAGE_SHIFT) + tables_space)))
-+ readonly = 1;
-+ /*
-+ * No need for writable mapping of kernel image. This also ensures that
-+ * page and descriptor tables embedded inside don't have writable
-+ * mappings.
-+ */
-+ if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end)))
-+ readonly = 1;
-+
-+ return readonly;
-+}
-+
-+static void __meminit
-+phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
-+{
-+ int i, k;
-+
-+ for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
-+ unsigned long pte_phys;
-+ pte_t *pte, *pte_save;
-+
-+ if (address >= end) {
-+ for (; i < PTRS_PER_PMD; i++, pmd++)
-+ set_pmd(pmd, __pmd(0));
-+ break;
-+ }
-+ pte = alloc_static_page(&pte_phys);
-+ pte_save = pte;
-+ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
-+ if ((address >= end) ||
-+ ((address >> PAGE_SHIFT) >=
-+ xen_start_info->nr_pages)) {
-+ __set_pte(pte, __pte(0));
-+ continue;
-+ }
-+ if (make_readonly(address)) {
-+ __set_pte(pte,
-+ __pte(address | (_KERNPG_TABLE & ~_PAGE_RW)));
-+ continue;
-+ }
-+ __set_pte(pte, __pte(address | _KERNPG_TABLE));
-+ }
-+ pte = pte_save;
-+ early_make_page_readonly(pte, XENFEAT_writable_page_tables);
-+ xen_pte_pin(pte_phys);
-+ set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
-+ }
-+}
-+
-+static void __meminit
-+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
-+{
-+ pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
-+
-+ if (pmd_none(*pmd)) {
-+ spin_lock(&init_mm.page_table_lock);
-+ phys_pmd_init(pmd, address, end);
-+ spin_unlock(&init_mm.page_table_lock);
-+ __flush_tlb_all();
-+ }
-+}
-+
-+static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
-+{
-+ long i = pud_index(address);
-+
-+ pud = pud + i;
-+
-+ if (after_bootmem && pud_val(*pud)) {
-+ phys_pmd_update(pud, address, end);
-+ return;
-+ }
-+
-+ for (; i < PTRS_PER_PUD; pud++, i++) {
-+ unsigned long paddr, pmd_phys;
-+ pmd_t *pmd;
-+
-+ paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
-+ if (paddr >= end)
-+ break;
-+
-+ pmd = alloc_static_page(&pmd_phys);
-+ early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+ xen_pmd_pin(pmd_phys);
-+ spin_lock(&init_mm.page_table_lock);
-+ set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
-+ phys_pmd_init(pmd, paddr, end);
-+ spin_unlock(&init_mm.page_table_lock);
-+ }
-+ __flush_tlb();
-+}
-+
-+void __init xen_init_pt(void)
-+{
-+ unsigned long addr, *page;
-+
-+ memset((void *)init_level4_pgt, 0, PAGE_SIZE);
-+ memset((void *)level3_kernel_pgt, 0, PAGE_SIZE);
-+ memset((void *)level2_kernel_pgt, 0, PAGE_SIZE);
-+
-+ /* Find the initial pte page that was built for us. */
-+ page = (unsigned long *)xen_start_info->pt_base;
-+ addr = page[pgd_index(__START_KERNEL_map)];
-+ addr_to_page(addr, page);
-+ addr = page[pud_index(__START_KERNEL_map)];
-+ addr_to_page(addr, page);
-+
-+ /* Construct mapping of initial pte page in our own directories. */
-+ init_level4_pgt[pgd_index(__START_KERNEL_map)] =
-+ mk_kernel_pgd(__pa_symbol(level3_kernel_pgt));
-+ level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
-+ __pud(__pa_symbol(level2_kernel_pgt) |
-+ _KERNPG_TABLE | _PAGE_USER);
-+ memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE);
-+
-+ early_make_page_readonly(init_level4_pgt,
-+ XENFEAT_writable_page_tables);
-+ early_make_page_readonly(init_level4_user_pgt,
-+ XENFEAT_writable_page_tables);
-+ early_make_page_readonly(level3_kernel_pgt,
-+ XENFEAT_writable_page_tables);
-+ early_make_page_readonly(level3_user_pgt,
-+ XENFEAT_writable_page_tables);
-+ early_make_page_readonly(level2_kernel_pgt,
-+ XENFEAT_writable_page_tables);
-+
-+ xen_pgd_pin(__pa_symbol(init_level4_pgt));
-+ xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
-+ xen_pud_pin(__pa_symbol(level3_kernel_pgt));
-+ xen_pud_pin(__pa_symbol(level3_user_pgt));
-+ xen_pmd_pin(__pa_symbol(level2_kernel_pgt));
-+
-+ set_pgd((pgd_t *)(init_level4_user_pgt + 511),
-+ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
-+}
-+
-+void __init extend_init_mapping(void)
-+{
-+ unsigned long va = __START_KERNEL_map;
-+ unsigned long phys, addr, *pte_page;
-+ pmd_t *pmd;
-+ pte_t *pte, new_pte;
-+ unsigned long *page = (unsigned long *)init_level4_pgt;
-+
-+ addr = page[pgd_index(va)];
-+ addr_to_page(addr, page);
-+ addr = page[pud_index(va)];
-+ addr_to_page(addr, page);
-+
-+ /* Kill mapping of low 1MB. */
-+ while (va < (unsigned long)&_text) {
-+ HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+ va += PAGE_SIZE;
-+ }
-+
-+ /* Ensure init mappings cover kernel text/data and initial tables. */
-+ while (va < (__START_KERNEL_map
-+ + (start_pfn << PAGE_SHIFT)
-+ + tables_space)) {
-+ pmd = (pmd_t *)&page[pmd_index(va)];
-+ if (pmd_none(*pmd)) {
-+ pte_page = alloc_static_page(&phys);
-+ early_make_page_readonly(
-+ pte_page, XENFEAT_writable_page_tables);
-+ xen_pte_pin(phys);
-+ set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER));
-+ } else {
-+ addr = page[pmd_index(va)];
-+ addr_to_page(addr, pte_page);
-+ }
-+ pte = (pte_t *)&pte_page[pte_index(va)];
-+ if (pte_none(*pte)) {
-+ new_pte = pfn_pte(
-+ (va - __START_KERNEL_map) >> PAGE_SHIFT,
-+ __pgprot(_KERNPG_TABLE | _PAGE_USER));
-+ xen_l1_entry_update(pte, new_pte);
-+ }
-+ va += PAGE_SIZE;
-+ }
-+
-+ /* Finally, blow away any spurious initial mappings. */
-+ while (1) {
-+ pmd = (pmd_t *)&page[pmd_index(va)];
-+ if (pmd_none(*pmd))
-+ break;
-+ HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+ va += PAGE_SIZE;
-+ }
-+}
-+
-+static void __init find_early_table_space(unsigned long end)
-+{
-+ unsigned long puds, pmds, ptes;
-+
-+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-+ ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
-+
-+ tables_space =
-+ round_up(puds * 8, PAGE_SIZE) +
-+ round_up(pmds * 8, PAGE_SIZE) +
-+ round_up(ptes * 8, PAGE_SIZE);
-+
-+ extend_init_mapping();
-+
-+ table_start = start_pfn;
-+
-+ early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
-+ end, table_start << PAGE_SHIFT, start_pfn << PAGE_SHIFT);
-+}
-+
-+/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
-+ This runs before bootmem is initialized and gets pages directly from the
-+ physical memory. To access them they are temporarily mapped. */
-+void __meminit init_memory_mapping(unsigned long start, unsigned long end)
-+{
-+ unsigned long next;
-+
-+ Dprintk("init_memory_mapping\n");
-+
-+ /*
-+ * Find space for the kernel direct mapping tables.
-+ * Later we should allocate these tables in the local node of the memory
-+ * mapped. Unfortunately this is done currently before the nodes are
-+ * discovered.
-+ */
-+ if (!after_bootmem)
-+ find_early_table_space(end);
-+
-+ start = (unsigned long)__va(start);
-+ end = (unsigned long)__va(end);
-+
-+ for (; start < end; start = next) {
-+ unsigned long pud_phys;
-+ pgd_t *pgd = pgd_offset_k(start);
-+ pud_t *pud;
-+
-+ if (after_bootmem) {
-+ pud = pud_offset_k(pgd, __PAGE_OFFSET);
-+ make_page_readonly(pud, XENFEAT_writable_page_tables);
-+ pud_phys = __pa(pud);
-+ } else {
-+ pud = alloc_static_page(&pud_phys);
-+ early_make_page_readonly(pud, XENFEAT_writable_page_tables);
-+ }
-+ xen_pud_pin(pud_phys);
-+ next = start + PGDIR_SIZE;
-+ if (next > end)
-+ next = end;
-+ phys_pud_init(pud, __pa(start), __pa(next));
-+ if (!after_bootmem)
-+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
-+ }
-+
-+ BUG_ON(!after_bootmem && start_pfn != table_start + (tables_space >> PAGE_SHIFT));
-+
-+ __flush_tlb_all();
-+}
-+
-+void __cpuinit zap_low_mappings(int cpu)
-+{
-+ /* this is not required for Xen */
-+#if 0
-+ swap_low_mappings();
-+#endif
-+}
-+
-+/* Compute zone sizes for the DMA and DMA32 zones in a node. */
-+__init void
-+size_zones(unsigned long *z, unsigned long *h,
-+ unsigned long start_pfn, unsigned long end_pfn)
-+{
-+ int i;
-+#ifndef CONFIG_XEN
-+ unsigned long w;
-+#endif
-+
-+ for (i = 0; i < MAX_NR_ZONES; i++)
-+ z[i] = 0;
-+
-+#ifndef CONFIG_XEN
-+ if (start_pfn < MAX_DMA_PFN)
-+ z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
-+ if (start_pfn < MAX_DMA32_PFN) {
-+ unsigned long dma32_pfn = MAX_DMA32_PFN;
-+ if (dma32_pfn > end_pfn)
-+ dma32_pfn = end_pfn;
-+ z[ZONE_DMA32] = dma32_pfn - start_pfn;
-+ }
-+ z[ZONE_NORMAL] = end_pfn - start_pfn;
-+
-+ /* Remove lower zones from higher ones. */
-+ w = 0;
-+ for (i = 0; i < MAX_NR_ZONES; i++) {
-+ if (z[i])
-+ z[i] -= w;
-+ w += z[i];
-+ }
-+
-+ /* Compute holes */
-+ w = start_pfn;
-+ for (i = 0; i < MAX_NR_ZONES; i++) {
-+ unsigned long s = w;
-+ w += z[i];
-+ h[i] = e820_hole_size(s, w);
-+ }
-+
-+ /* Add the space pace needed for mem_map to the holes too. */
-+ for (i = 0; i < MAX_NR_ZONES; i++)
-+ h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
-+
-+ /* The 16MB DMA zone has the kernel and other misc mappings.
-+ Account them too */
-+ if (h[ZONE_DMA]) {
-+ h[ZONE_DMA] += dma_reserve;
-+ if (h[ZONE_DMA] >= z[ZONE_DMA]) {
-+ printk(KERN_WARNING
-+ "Kernel too large and filling up ZONE_DMA?\n");
-+ h[ZONE_DMA] = z[ZONE_DMA];
-+ }
-+ }
-+#else
-+ z[ZONE_DMA] = end_pfn;
-+ for (i = 0; i < MAX_NR_ZONES; i++)
-+ h[i] = 0;
-+#endif
-+}
-+
-+#ifndef CONFIG_NUMA
-+void __init paging_init(void)
-+{
-+ unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
-+ int i;
-+
-+ memory_present(0, 0, end_pfn);
-+ sparse_init();
-+ size_zones(zones, holes, 0, end_pfn);
-+ free_area_init_node(0, NODE_DATA(0), zones,
-+ __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
-+
-+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+
-+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
-+ init_mm.context.pinned = 1;
-+
-+ /* Setup mapping of lower 1st MB */
-+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+ if (xen_start_info->flags & SIF_PRIVILEGED)
-+ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+ else
-+ __set_fixmap(FIX_ISAMAP_BEGIN - i,
-+ virt_to_mfn(empty_zero_page) << PAGE_SHIFT,
-+ PAGE_KERNEL_RO);
-+}
-+#endif
-+
-+/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
-+ from the CPU leading to inconsistent cache lines. address and size
-+ must be aligned to 2MB boundaries.
-+ Does nothing when the mapping doesn't exist. */
-+void __init clear_kernel_mapping(unsigned long address, unsigned long size)
-+{
-+ unsigned long end = address + size;
-+
-+ BUG_ON(address & ~LARGE_PAGE_MASK);
-+ BUG_ON(size & ~LARGE_PAGE_MASK);
-+
-+ for (; address < end; address += LARGE_PAGE_SIZE) {
-+ pgd_t *pgd = pgd_offset_k(address);
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ if (pgd_none(*pgd))
-+ continue;
-+ pud = pud_offset(pgd, address);
-+ if (pud_none(*pud))
-+ continue;
-+ pmd = pmd_offset(pud, address);
-+ if (!pmd || pmd_none(*pmd))
-+ continue;
-+ if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
-+ /* Could handle this, but it should not happen currently. */
-+ printk(KERN_ERR
-+ "clear_kernel_mapping: mapping has been split. will leak memory\n");
-+ pmd_ERROR(*pmd);
-+ }
-+ set_pmd(pmd, __pmd(0));
-+ }
-+ __flush_tlb_all();
-+}
-+
-+/*
-+ * Memory hotplug specific functions
-+ * These are only for non-NUMA machines right now.
-+ */
-+#ifdef CONFIG_MEMORY_HOTPLUG
-+
-+void online_page(struct page *page)
-+{
-+ ClearPageReserved(page);
-+ set_page_count(page, 1);
-+ __free_page(page);
-+ totalram_pages++;
-+ num_physpages++;
-+}
-+
-+int add_memory(u64 start, u64 size)
-+{
-+ struct pglist_data *pgdat = NODE_DATA(0);
-+ struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
-+ unsigned long start_pfn = start >> PAGE_SHIFT;
-+ unsigned long nr_pages = size >> PAGE_SHIFT;
-+ int ret;
-+
-+ ret = __add_pages(zone, start_pfn, nr_pages);
-+ if (ret)
-+ goto error;
-+
-+ init_memory_mapping(start, (start + size -1));
-+
-+ return ret;
-+error:
-+ printk("%s: Problem encountered in __add_pages!\n", __func__);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(add_memory);
-+
-+int remove_memory(u64 start, u64 size)
-+{
-+ return -EINVAL;
-+}
-+EXPORT_SYMBOL_GPL(remove_memory);
-+
-+#endif
-+
-+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
-+ kcore_vsyscall;
-+
-+void __init mem_init(void)
-+{
-+ long codesize, reservedpages, datasize, initsize;
-+
-+ contiguous_bitmap = alloc_bootmem_low_pages(
-+ (end_pfn + 2*BITS_PER_LONG) >> 3);
-+ BUG_ON(!contiguous_bitmap);
-+ memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
-+
-+#if defined(CONFIG_SWIOTLB)
-+ pci_swiotlb_init();
-+#endif
-+ no_iommu_init();
-+
-+ /* How many end-of-memory variables you have, grandma! */
-+ max_low_pfn = end_pfn;
-+ max_pfn = end_pfn;
-+ num_physpages = end_pfn;
-+ high_memory = (void *) __va(end_pfn * PAGE_SIZE);
-+
-+ /* clear the zero-page */
-+ memset(empty_zero_page, 0, PAGE_SIZE);
-+
-+ reservedpages = 0;
-+
-+ /* this will put all low memory onto the freelists */
-+#ifdef CONFIG_NUMA
-+ totalram_pages = numa_free_all_bootmem();
-+#else
-+ totalram_pages = free_all_bootmem();
-+#endif
-+ reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
-+
-+ after_bootmem = 1;
-+
-+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
-+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
-+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-+
-+ /* Register memory areas for /proc/kcore */
-+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
-+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
-+ VMALLOC_END-VMALLOC_START);
-+ kclist_add(&kcore_kernel, &_stext, _end - _stext);
-+ kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
-+ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
-+ VSYSCALL_END - VSYSCALL_START);
-+
-+ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
-+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+ end_pfn << (PAGE_SHIFT-10),
-+ codesize >> 10,
-+ reservedpages << (PAGE_SHIFT-10),
-+ datasize >> 10,
-+ initsize >> 10);
-+
-+#ifndef CONFIG_XEN
-+#ifdef CONFIG_SMP
-+ /*
-+ * Sync boot_level4_pgt mappings with the init_level4_pgt
-+ * except for the low identity mappings which are already zapped
-+ * in init_level4_pgt. This sync-up is essential for AP's bringup
-+ */
-+ memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
-+#endif
-+#endif
-+}
-+
-+void free_initmem(void)
-+{
-+#ifdef __DO_LATER__
-+ /*
-+ * Some pages can be pinned, but some are not. Unpinning such pages
-+ * triggers BUG().
-+ */
-+ unsigned long addr;
-+
-+ addr = (unsigned long)(&__init_begin);
-+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-+ ClearPageReserved(virt_to_page(addr));
-+ set_page_count(virt_to_page(addr), 1);
-+ memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
-+ xen_pte_unpin(__pa(addr));
-+ make_page_writable(
-+ __va(__pa(addr)), XENFEAT_writable_page_tables);
-+ /*
-+ * Make pages from __PAGE_OFFSET address as well
-+ */
-+ make_page_writable(
-+ (void *)addr, XENFEAT_writable_page_tables);
-+ free_page(addr);
-+ totalram_pages++;
-+ }
-+ memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
-+ printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
-+#endif
-+}
-+
-+#ifdef CONFIG_DEBUG_RODATA
-+
-+extern char __start_rodata, __end_rodata;
-+void mark_rodata_ro(void)
-+{
-+ unsigned long addr = (unsigned long)&__start_rodata;
-+
-+ for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
-+ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
-+
-+ printk ("Write protecting the kernel read-only data: %luk\n",
-+ (&__end_rodata - &__start_rodata) >> 10);
-+
-+ /*
-+ * change_page_attr_addr() requires a global_flush_tlb() call after it.
-+ * We do this after the printk so that if something went wrong in the
-+ * change, the printk gets out at least to give a better debug hint
-+ * of who is the culprit.
-+ */
-+ global_flush_tlb();
-+}
-+#endif
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+ if (start >= end)
-+ return;
-+ printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-+ for (; start < end; start += PAGE_SIZE) {
-+ ClearPageReserved(virt_to_page(start));
-+ set_page_count(virt_to_page(start), 1);
-+ free_page(start);
-+ totalram_pages++;
-+ }
-+}
-+#endif
-+
-+void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
-+{
-+ /* Should check here against the e820 map to avoid double free */
-+#ifdef CONFIG_NUMA
-+ int nid = phys_to_nid(phys);
-+ reserve_bootmem_node(NODE_DATA(nid), phys, len);
-+#else
-+ reserve_bootmem(phys, len);
-+#endif
-+ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
-+ dma_reserve += len / PAGE_SIZE;
-+}
-+
-+int kern_addr_valid(unsigned long addr)
-+{
-+ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+
-+ if (above != 0 && above != -1UL)
-+ return 0;
-+
-+ pgd = pgd_offset_k(addr);
-+ if (pgd_none(*pgd))
-+ return 0;
-+
-+ pud = pud_offset_k(pgd, addr);
-+ if (pud_none(*pud))
-+ return 0;
-+
-+ pmd = pmd_offset(pud, addr);
-+ if (pmd_none(*pmd))
-+ return 0;
-+ if (pmd_large(*pmd))
-+ return pfn_valid(pmd_pfn(*pmd));
-+
-+ pte = pte_offset_kernel(pmd, addr);
-+ if (pte_none(*pte))
-+ return 0;
-+ return pfn_valid(pte_pfn(*pte));
-+}
-+
-+#ifdef CONFIG_SYSCTL
-+#include <linux/sysctl.h>
-+
-+extern int exception_trace, page_fault_trace;
-+
-+static ctl_table debug_table2[] = {
-+ { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
-+ proc_dointvec },
-+ { 0, }
-+};
-+
-+static ctl_table debug_root_table2[] = {
-+ { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
-+ .child = debug_table2 },
-+ { 0 },
-+};
-+
-+static __init int x8664_sysctl_init(void)
-+{
-+ register_sysctl_table(debug_root_table2, 1);
-+ return 0;
-+}
-+__initcall(x8664_sysctl_init);
-+#endif
-+
-+/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
-+ covers the 64bit vsyscall page now. 32bit has a real VMA now and does
-+ not need special handling anymore. */
-+
-+static struct vm_area_struct gate_vma = {
-+ .vm_start = VSYSCALL_START,
-+ .vm_end = VSYSCALL_END,
-+ .vm_page_prot = PAGE_READONLY
-+};
-+
-+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-+{
-+#ifdef CONFIG_IA32_EMULATION
-+ if (test_tsk_thread_flag(tsk, TIF_IA32))
-+ return NULL;
-+#endif
-+ return &gate_vma;
-+}
-+
-+int in_gate_area(struct task_struct *task, unsigned long addr)
-+{
-+ struct vm_area_struct *vma = get_gate_vma(task);
-+ if (!vma)
-+ return 0;
-+ return (addr >= vma->vm_start) && (addr < vma->vm_end);
-+}
-+
-+/* Use this when you have no reliable task/vma, typically from interrupt
-+ * context. It is less reliable than using the task's vma and may give
-+ * false positives.
-+ */
-+int in_gate_area_no_task(unsigned long addr)
-+{
-+ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/arch/x86_64/mm/pageattr-xen.c b/arch/x86_64/mm/pageattr-xen.c
-new file mode 100644
-index 0000000..a2305bc
---- /dev/null
-+++ b/arch/x86_64/mm/pageattr-xen.c
-@@ -0,0 +1,382 @@
-+/*
-+ * Copyright 2002 Andi Kleen, SuSE Labs.
-+ * Thanks to Ben LaHaise for precious feedback.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+
-+#ifdef CONFIG_XEN
-+#include <asm/pgalloc.h>
-+#include <asm/mmu_context.h>
-+
-+LIST_HEAD(mm_unpinned);
-+DEFINE_SPINLOCK(mm_unpinned_lock);
-+
-+static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
-+{
-+ struct page *page = virt_to_page(pt);
-+ unsigned long pfn = page_to_pfn(page);
-+
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(pfn << PAGE_SHIFT),
-+ pfn_pte(pfn, flags), 0));
-+}
-+
-+static void mm_walk(struct mm_struct *mm, pgprot_t flags)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ int g,u,m;
-+
-+ pgd = mm->pgd;
-+ /*
-+ * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
-+ * be the 'current' task's pagetables (e.g., current may be 32-bit,
-+ * but the pagetables may be for a 64-bit task).
-+ * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
-+ * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
-+ */
-+ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
-+ if (pgd_none(*pgd))
-+ continue;
-+ pud = pud_offset(pgd, 0);
-+ if (PTRS_PER_PUD > 1) /* not folded */
-+ mm_walk_set_prot(pud,flags);
-+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+ if (pud_none(*pud))
-+ continue;
-+ pmd = pmd_offset(pud, 0);
-+ if (PTRS_PER_PMD > 1) /* not folded */
-+ mm_walk_set_prot(pmd,flags);
-+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+ if (pmd_none(*pmd))
-+ continue;
-+ pte = pte_offset_kernel(pmd,0);
-+ mm_walk_set_prot(pte,flags);
-+ }
-+ }
-+ }
-+}
-+
-+void mm_pin(struct mm_struct *mm)
-+{
-+ spin_lock(&mm->page_table_lock);
-+
-+ mm_walk(mm, PAGE_KERNEL_RO);
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)mm->pgd,
-+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
-+ UVMF_TLB_FLUSH));
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)__user_pgd(mm->pgd),
-+ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
-+ UVMF_TLB_FLUSH));
-+ xen_pgd_pin(__pa(mm->pgd)); /* kernel */
-+ xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
-+ mm->context.pinned = 1;
-+ spin_lock(&mm_unpinned_lock);
-+ list_del(&mm->context.unpinned);
-+ spin_unlock(&mm_unpinned_lock);
-+
-+ spin_unlock(&mm->page_table_lock);
-+}
-+
-+void mm_unpin(struct mm_struct *mm)
-+{
-+ spin_lock(&mm->page_table_lock);
-+
-+ xen_pgd_unpin(__pa(mm->pgd));
-+ xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)mm->pgd,
-+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)__user_pgd(mm->pgd),
-+ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
-+ mm_walk(mm, PAGE_KERNEL);
-+ xen_tlb_flush();
-+ mm->context.pinned = 0;
-+ spin_lock(&mm_unpinned_lock);
-+ list_add(&mm->context.unpinned, &mm_unpinned);
-+ spin_unlock(&mm_unpinned_lock);
-+
-+ spin_unlock(&mm->page_table_lock);
-+}
-+
-+void mm_pin_all(void)
-+{
-+ while (!list_empty(&mm_unpinned))
-+ mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
-+ context.unpinned));
-+}
-+
-+void _arch_exit_mmap(struct mm_struct *mm)
-+{
-+ struct task_struct *tsk = current;
-+
-+ task_lock(tsk);
-+
-+ /*
-+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-+ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-+ */
-+ if ( tsk->active_mm == mm )
-+ {
-+ tsk->active_mm = &init_mm;
-+ atomic_inc(&init_mm.mm_count);
-+
-+ switch_mm(mm, &init_mm, tsk);
-+
-+ atomic_dec(&mm->mm_count);
-+ BUG_ON(atomic_read(&mm->mm_count) == 0);
-+ }
-+
-+ task_unlock(tsk);
-+
-+ if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) )
-+ mm_unpin(mm);
-+}
-+#endif /* CONFIG_XEN */
-+
-+static inline pte_t *lookup_address(unsigned long address)
-+{
-+ pgd_t *pgd = pgd_offset_k(address);
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *pte;
-+ if (pgd_none(*pgd))
-+ return NULL;
-+ pud = pud_offset(pgd, address);
-+ if (!pud_present(*pud))
-+ return NULL;
-+ pmd = pmd_offset(pud, address);
-+ if (!pmd_present(*pmd))
-+ return NULL;
-+ if (pmd_large(*pmd))
-+ return (pte_t *)pmd;
-+ pte = pte_offset_kernel(pmd, address);
-+ if (pte && !pte_present(*pte))
-+ pte = NULL;
-+ return pte;
-+}
-+
-+static struct page *split_large_page(unsigned long address, pgprot_t prot,
-+ pgprot_t ref_prot)
-+{
-+ int i;
-+ unsigned long addr;
-+ struct page *base = alloc_pages(GFP_KERNEL, 0);
-+ pte_t *pbase;
-+ if (!base)
-+ return NULL;
-+ address = __pa(address);
-+ addr = address & LARGE_PAGE_MASK;
-+ pbase = (pte_t *)page_address(base);
-+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
-+ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
-+ addr == address ? prot : ref_prot);
-+ }
-+ return base;
-+}
-+
-+
-+static void flush_kernel_map(void *address)
-+{
-+ if (0 && address && cpu_has_clflush) {
-+ /* is this worth it? */
-+ int i;
-+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
-+ asm volatile("clflush (%0)" :: "r" (address + i));
-+ } else
-+ asm volatile("wbinvd":::"memory");
-+ if (address)
-+ __flush_tlb_one(address);
-+ else
-+ __flush_tlb_all();
-+}
-+
-+
-+static inline void flush_map(unsigned long address)
-+{
-+ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
-+}
-+
-+struct deferred_page {
-+ struct deferred_page *next;
-+ struct page *fpage;
-+ unsigned long address;
-+};
-+static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
-+
-+static inline void save_page(unsigned long address, struct page *fpage)
-+{
-+ struct deferred_page *df;
-+ df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL);
-+ if (!df) {
-+ flush_map(address);
-+ __free_page(fpage);
-+ } else {
-+ df->next = df_list;
-+ df->fpage = fpage;
-+ df->address = address;
-+ df_list = df;
-+ }
-+}
-+
-+/*
-+ * No more special protections in this 2/4MB area - revert to a
-+ * large page again.
-+ */
-+static void revert_page(unsigned long address, pgprot_t ref_prot)
-+{
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t large_pte;
-+
-+ pgd = pgd_offset_k(address);
-+ BUG_ON(pgd_none(*pgd));
-+ pud = pud_offset(pgd,address);
-+ BUG_ON(pud_none(*pud));
-+ pmd = pmd_offset(pud, address);
-+ BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
-+ pgprot_val(ref_prot) |= _PAGE_PSE;
-+ large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
-+ set_pte((pte_t *)pmd, large_pte);
-+}
-+
-+static int
-+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
-+ pgprot_t ref_prot)
-+{
-+ pte_t *kpte;
-+ struct page *kpte_page;
-+ unsigned kpte_flags;
-+ pgprot_t ref_prot2;
-+ kpte = lookup_address(address);
-+ if (!kpte) return 0;
-+ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
-+ kpte_flags = pte_val(*kpte);
-+ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
-+ if ((kpte_flags & _PAGE_PSE) == 0) {
-+ set_pte(kpte, pfn_pte(pfn, prot));
-+ } else {
-+ /*
-+ * split_large_page will take the reference for this change_page_attr
-+ * on the split page.
-+ */
-+
-+ struct page *split;
-+ ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
-+
-+ split = split_large_page(address, prot, ref_prot2);
-+ if (!split)
-+ return -ENOMEM;
-+ set_pte(kpte,mk_pte(split, ref_prot2));
-+ kpte_page = split;
-+ }
-+ get_page(kpte_page);
-+ } else if ((kpte_flags & _PAGE_PSE) == 0) {
-+ set_pte(kpte, pfn_pte(pfn, ref_prot));
-+ __put_page(kpte_page);
-+ } else
-+ BUG();
-+
-+ /* on x86-64 the direct mapping set at boot is not using 4k pages */
-+ /*
-+ * ..., but the XEN guest kernels (currently) do:
-+ * If the pte was reserved, it means it was created at boot
-+ * time (not via split_large_page) and in turn we must not
-+ * replace it with a large page.
-+ */
-+#ifndef CONFIG_XEN
-+ BUG_ON(PageReserved(kpte_page));
-+#else
-+ if (!PageReserved(kpte_page))
-+#endif
-+ switch (page_count(kpte_page)) {
-+ case 1:
-+ save_page(address, kpte_page);
-+ revert_page(address, ref_prot);
-+ break;
-+ case 0:
-+ BUG(); /* memleak and failed 2M page regeneration */
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Change the page attributes of an page in the linear mapping.
-+ *
-+ * This should be used when a page is mapped with a different caching policy
-+ * than write-back somewhere - some CPUs do not like it when mappings with
-+ * different caching policies exist. This changes the page attributes of the
-+ * in kernel linear mapping too.
-+ *
-+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
-+ * This function only deals with the kernel linear map.
-+ *
-+ * Caller must call global_flush_tlb() after this.
-+ */
-+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
-+{
-+ int err = 0;
-+ int i;
-+
-+ down_write(&init_mm.mmap_sem);
-+ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
-+ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
-+
-+ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
-+ if (err)
-+ break;
-+ /* Handle kernel mapping too which aliases part of the
-+ * lowmem */
-+ if (__pa(address) < KERNEL_TEXT_SIZE) {
-+ unsigned long addr2;
-+ pgprot_t prot2 = prot;
-+ addr2 = __START_KERNEL_map + __pa(address);
-+ pgprot_val(prot2) &= ~_PAGE_NX;
-+ err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
-+ }
-+ }
-+ up_write(&init_mm.mmap_sem);
-+ return err;
-+}
-+
-+/* Don't call this for MMIO areas that may not have a mem_map entry */
-+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-+{
-+ unsigned long addr = (unsigned long)page_address(page);
-+ return change_page_attr_addr(addr, numpages, prot);
-+}
-+
-+void global_flush_tlb(void)
-+{
-+ struct deferred_page *df, *next_df;
-+
-+ down_read(&init_mm.mmap_sem);
-+ df = xchg(&df_list, NULL);
-+ up_read(&init_mm.mmap_sem);
-+ flush_map((df && !df->next) ? df->address : 0);
-+ for (; df; df = next_df) {
-+ next_df = df->next;
-+ if (df->fpage)
-+ __free_page(df->fpage);
-+ kfree(df);
-+ }
-+}
-+
-+EXPORT_SYMBOL(change_page_attr);
-+EXPORT_SYMBOL(global_flush_tlb);
-diff --git a/arch/x86_64/pci/Makefile b/arch/x86_64/pci/Makefile
-index a8f75a2..73dd478 100644
---- a/arch/x86_64/pci/Makefile
-+++ b/arch/x86_64/pci/Makefile
-@@ -15,10 +15,22 @@ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
-
- obj-$(CONFIG_NUMA) += k8-bus.o
-
-+# pcifront should be after mmconfig.o and direct.o as it should only
-+# take over if direct access to the PCI bus is unavailable
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
-+
- direct-y += ../../i386/pci/direct.o
- acpi-y += ../../i386/pci/acpi.o
-+pcifront-y += ../../i386/pci/pcifront.o
- legacy-y += ../../i386/pci/legacy.o
- irq-y += ../../i386/pci/irq.o
- common-y += ../../i386/pci/common.o
- fixup-y += ../../i386/pci/fixup.o
- i386-y += ../../i386/pci/i386.o
-+
-+ifdef CONFIG_XEN
-+irq-y := ../../i386/pci/irq-xen.o
-+include $(srctree)/scripts/Makefile.xen
-+
-+obj-y := $(call cherrypickxen, $(obj-y))
-+endif
-diff --git a/drivers/Makefile b/drivers/Makefile
-index 5c69b86..73c1c4a 100644
---- a/drivers/Makefile
-+++ b/drivers/Makefile
-@@ -34,6 +34,7 @@ obj-y += base/ block/ misc/ mfd/ net/
- obj-$(CONFIG_NUBUS) += nubus/
- obj-$(CONFIG_ATM) += atm/
- obj-$(CONFIG_PPC_PMAC) += macintosh/
-+obj-$(CONFIG_XEN) += xen/
- obj-$(CONFIG_IDE) += ide/
- obj-$(CONFIG_FC4) += fc4/
- obj-$(CONFIG_SCSI) += scsi/
-diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
-index 33e2ca8..b6e265a 100644
---- a/drivers/acpi/Kconfig
-+++ b/drivers/acpi/Kconfig
-@@ -46,7 +46,7 @@ if ACPI
-
- config ACPI_SLEEP
- bool "Sleep States"
-- depends on X86 && (!SMP || SUSPEND_SMP)
-+ depends on X86 && (!SMP || SUSPEND_SMP) && !XEN
- depends on PM
- default y
- ---help---
-@@ -287,6 +287,7 @@ config ACPI_SYSTEM
- config X86_PM_TIMER
- bool "Power Management Timer Support" if EMBEDDED
- depends on X86
-+ depends on !XEN
- default y
- help
- The Power Management Timer is available on all ACPI-capable,
-diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
-index 31d4f3f..a60095a 100644
---- a/drivers/acpi/tables.c
-+++ b/drivers/acpi/tables.c
-@@ -572,6 +572,11 @@ static int __init acpi_table_get_sdt(str
- *
- * result: sdt_entry[] is initialized
- */
-+#if defined(CONFIG_X86_XEN) || defined(CONFIG_X86_64_XEN)
-+#define acpi_rsdp_phys_to_va(rsdp_phys) isa_bus_to_virt(rsdp_phys)
-+#else
-+#define acpi_rsdp_phys_to_va(rsdp_phys) __va(rsdp_phys)
-+#endif
-
- int __init acpi_table_init(void)
- {
-@@ -587,7 +592,7 @@ int __init acpi_table_init(void)
- return -ENODEV;
- }
-
-- rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
-+ rsdp = (struct acpi_table_rsdp *)acpi_rsdp_phys_to_va(rsdp_phys);
- if (!rsdp) {
- printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
- return -ENODEV;
-diff --git a/drivers/char/mem.c b/drivers/char/mem.c
-index 29c41f4..e65a3ef 100644
---- a/drivers/char/mem.c
-+++ b/drivers/char/mem.c
-@@ -108,6 +108,7 @@ static inline int valid_mmap_phys_addr_r
- }
- #endif
-
-+#ifndef ARCH_HAS_DEV_MEM
- /*
- * This funcion reads the *physical* memory. The f_pos points directly to the
- * memory location.
-@@ -232,6 +233,7 @@ static ssize_t write_mem(struct file * f
- *ppos += written;
- return written;
- }
-+#endif
-
- #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
- static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
-@@ -773,6 +775,7 @@ static int open_port(struct inode * inod
- #define open_kmem open_mem
- #define open_oldmem open_mem
-
-+#ifndef ARCH_HAS_DEV_MEM
- static struct file_operations mem_fops = {
- .llseek = memory_lseek,
- .read = read_mem,
-@@ -780,6 +783,9 @@ static struct file_operations mem_fops =
- .mmap = mmap_mem,
- .open = open_mem,
- };
-+#else
-+extern struct file_operations mem_fops;
-+#endif
-
- static struct file_operations kmem_fops = {
- .llseek = memory_lseek,
-diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
-index a6873bf..a0879ae 100644
---- a/drivers/char/tpm/Kconfig
-+++ b/drivers/char/tpm/Kconfig
-@@ -22,7 +22,7 @@ config TCG_TPM
-
- config TCG_NSC
- tristate "National Semiconductor TPM Interface"
-- depends on TCG_TPM
-+ depends on TCG_TPM && !XEN_UNPRIVILEGED_GUEST
- ---help---
- If you have a TPM security chip from National Semicondutor
- say Yes and it will be accessible from within Linux. To
-@@ -31,7 +31,7 @@ config TCG_NSC
-
- config TCG_ATMEL
- tristate "Atmel TPM Interface"
-- depends on TCG_TPM
-+ depends on TCG_TPM && !XEN_UNPRIVILEGED_GUEST
- ---help---
- If you have a TPM security chip from Atmel say Yes and it
- will be accessible from within Linux. To compile this driver
-@@ -49,5 +49,15 @@ config TCG_INFINEON
- Further information on this driver and the supported hardware
- can be found at http://www.prosec.rub.de/tpm
-
-+config TCG_XEN
-+ tristate "XEN TPM Interface"
-+ depends on TCG_TPM && XEN && XEN_TPMDEV_FRONTEND
-+ ---help---
-+ If you want to make TPM support available to a Xen
-+ user domain, say Yes and it will
-+ be accessible from within Linux. To compile this driver
-+ as a module, choose M here; the module will be called
-+ tpm_xen.
-+
- endmenu
-
-diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
-index ba4582d..f266799 100644
---- a/drivers/char/tpm/Makefile
-+++ b/drivers/char/tpm/Makefile
-@@ -8,3 +8,4 @@ endif
- obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
- obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
- obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
-+obj-$(CONFIG_TCG_XEN) += tpm_xen.o
-diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
-index 5a38704..9709a60 100644
---- a/drivers/char/tpm/tpm.c
-+++ b/drivers/char/tpm/tpm.c
-@@ -30,7 +30,8 @@
-
- enum tpm_const {
- TPM_MINOR = 224, /* officially assigned */
-- TPM_BUFSIZE = 2048,
-+ TPM_MIN_BUFSIZE = 2048,
-+ TPM_MAX_BUFSIZE = 64 * 1024,
- TPM_NUM_DEVICES = 256,
- TPM_NUM_MASK_ENTRIES = TPM_NUM_DEVICES / (8 * sizeof(int))
- };
-@@ -52,14 +53,14 @@ static void timeout_work(void * ptr)
-
- down(&chip->buffer_mutex);
- atomic_set(&chip->data_pending, 0);
-- memset(chip->data_buffer, 0, TPM_BUFSIZE);
-+ memset(chip->data_buffer, 0, get_chip_buffersize(chip));
- up(&chip->buffer_mutex);
- }
-
- /*
- * Internal kernel interface to transmit TPM commands
- */
--static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
-+static ssize_t tpm_transmit(struct tpm_chip * chip, const char *buf,
- size_t bufsiz)
- {
- ssize_t rc;
-@@ -351,7 +352,7 @@ int tpm_open(struct inode *inode, struct
-
- spin_unlock(&driver_lock);
-
-- chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
-+ chip->data_buffer = kmalloc(get_chip_buffersize(chip) * sizeof(u8), GFP_KERNEL);
- if (chip->data_buffer == NULL) {
- chip->num_opens--;
- put_device(chip->dev);
-@@ -399,8 +400,8 @@ ssize_t tpm_write(struct file *file, con
-
- down(&chip->buffer_mutex);
-
-- if (in_size > TPM_BUFSIZE)
-- in_size = TPM_BUFSIZE;
-+ if (in_size > get_chip_buffersize(chip))
-+ in_size = get_chip_buffersize(chip);
-
- if (copy_from_user
- (chip->data_buffer, (void __user *) buf, in_size)) {
-@@ -409,9 +410,11 @@ ssize_t tpm_write(struct file *file, con
- }
-
- /* atomic tpm command send and result receive */
-- out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
-+ out_size = tpm_transmit(chip, chip->data_buffer,
-+ get_chip_buffersize(chip));
-
- atomic_set(&chip->data_pending, out_size);
-+ atomic_set(&chip->data_position, 0);
- up(&chip->buffer_mutex);
-
- /* Set a timeout by which the reader must come claim the result */
-@@ -427,20 +430,33 @@ ssize_t tpm_read(struct file * file, cha
- {
- struct tpm_chip *chip = file->private_data;
- int ret_size;
-+ int pos, pending = 0;
-
-- del_singleshot_timer_sync(&chip->user_read_timer);
-- flush_scheduled_work();
- ret_size = atomic_read(&chip->data_pending);
-- atomic_set(&chip->data_pending, 0);
- if (ret_size > 0) { /* relay data */
- if (size < ret_size)
- ret_size = size;
-
-+ pos = atomic_read(&chip->data_position);
-+
- down(&chip->buffer_mutex);
-- if (copy_to_user(buf, chip->data_buffer, ret_size))
-+ if (copy_to_user(buf, &chip->data_buffer[pos], ret_size)) {
- ret_size = -EFAULT;
-+ } else {
-+ pending = atomic_read(&chip->data_pending) - ret_size;
-+ if ( pending ) {
-+ atomic_set( &chip->data_pending, pending );
-+ atomic_set( &chip->data_position, pos+ret_size );
-+ }
-+ }
- up(&chip->buffer_mutex);
- }
-+
-+ if ( ret_size <= 0 || pending == 0 ) {
-+ atomic_set( &chip->data_pending, 0 );
-+ del_singleshot_timer_sync(&chip->user_read_timer);
-+ flush_scheduled_work();
-+ }
-
- return ret_size;
- }
-@@ -544,6 +560,12 @@ int tpm_register_hardware(struct device
- chip->user_read_timer.data = (unsigned long) chip;
-
- chip->vendor = entry;
-+
-+ if (entry->buffersize < TPM_MIN_BUFSIZE) {
-+ entry->buffersize = TPM_MIN_BUFSIZE;
-+ } else if (entry->buffersize > TPM_MAX_BUFSIZE) {
-+ entry->buffersize = TPM_MAX_BUFSIZE;
-+ }
-
- chip->dev_num = -1;
-
-diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
-index fd3a4be..090b889 100644
---- a/drivers/char/tpm/tpm.h
-+++ b/drivers/char/tpm/tpm.h
-@@ -50,6 +50,7 @@ struct tpm_vendor_specific {
- u8 req_complete_mask;
- u8 req_complete_val;
- u8 req_canceled;
-+ u32 buffersize;
- void __iomem *iobase; /* ioremapped address */
- unsigned long base; /* TPM base address */
-
-@@ -74,6 +75,7 @@ struct tpm_chip {
- /* Data passed to and from the tpm via the read/write calls */
- u8 *data_buffer;
- atomic_t data_pending;
-+ atomic_t data_position;
- struct semaphore buffer_mutex;
-
- struct timer_list user_read_timer; /* user needs to claim result */
-@@ -99,6 +101,11 @@ static inline void tpm_write_index(int b
- outb(value & 0xFF, base+1);
- }
-
-+static inline u32 get_chip_buffersize(struct tpm_chip *chip)
-+{
-+ return chip->vendor->buffersize;
-+}
-+
- extern int tpm_register_hardware(struct device *,
- struct tpm_vendor_specific *);
- extern int tpm_open(struct inode *, struct file *);
-diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
-deleted file mode 100644
-index ff36549..0000000
---- a/drivers/char/tpm/tpm_atmel.c
-+++ /dev/null
-@@ -1,230 +0,0 @@
--/*
-- * Copyright (C) 2004 IBM Corporation
-- *
-- * Authors:
-- * Leendert van Doorn <leendert@watson.ibm.com>
-- * Dave Safford <safford@watson.ibm.com>
-- * Reiner Sailer <sailer@watson.ibm.com>
-- * Kylene Hall <kjhall@us.ibm.com>
-- *
-- * Maintained by: <tpmdd_devel@lists.sourceforge.net>
-- *
-- * Device driver for TCG/TCPA TPM (trusted platform module).
-- * Specifications at www.trustedcomputinggroup.org
-- *
-- * This program is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation, version 2 of the
-- * License.
-- *
-- */
--
--#include "tpm.h"
--#include "tpm_atmel.h"
--
--/* write status bits */
--enum tpm_atmel_write_status {
-- ATML_STATUS_ABORT = 0x01,
-- ATML_STATUS_LASTBYTE = 0x04
--};
--/* read status bits */
--enum tpm_atmel_read_status {
-- ATML_STATUS_BUSY = 0x01,
-- ATML_STATUS_DATA_AVAIL = 0x02,
-- ATML_STATUS_REWRITE = 0x04,
-- ATML_STATUS_READY = 0x08
--};
--
--static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
--{
-- u8 status, *hdr = buf;
-- u32 size;
-- int i;
-- __be32 *native_size;
--
-- /* start reading header */
-- if (count < 6)
-- return -EIO;
--
-- for (i = 0; i < 6; i++) {
-- status = ioread8(chip->vendor->iobase + 1);
-- if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
-- dev_err(chip->dev, "error reading header\n");
-- return -EIO;
-- }
-- *buf++ = ioread8(chip->vendor->iobase);
-- }
--
-- /* size of the data received */
-- native_size = (__force __be32 *) (hdr + 2);
-- size = be32_to_cpu(*native_size);
--
-- if (count < size) {
-- dev_err(chip->dev,
-- "Recv size(%d) less than available space\n", size);
-- for (; i < size; i++) { /* clear the waiting data anyway */
-- status = ioread8(chip->vendor->iobase + 1);
-- if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
-- dev_err(chip->dev, "error reading data\n");
-- return -EIO;
-- }
-- }
-- return -EIO;
-- }
--
-- /* read all the data available */
-- for (; i < size; i++) {
-- status = ioread8(chip->vendor->iobase + 1);
-- if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
-- dev_err(chip->dev, "error reading data\n");
-- return -EIO;
-- }
-- *buf++ = ioread8(chip->vendor->iobase);
-- }
--
-- /* make sure data available is gone */
-- status = ioread8(chip->vendor->iobase + 1);
--
-- if (status & ATML_STATUS_DATA_AVAIL) {
-- dev_err(chip->dev, "data available is stuck\n");
-- return -EIO;
-- }
--
-- return size;
--}
--
--static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
--{
-- int i;
--
-- dev_dbg(chip->dev, "tpm_atml_send:\n");
-- for (i = 0; i < count; i++) {
-- dev_dbg(chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
-- iowrite8(buf[i], chip->vendor->iobase);
-- }
--
-- return count;
--}
--
--static void tpm_atml_cancel(struct tpm_chip *chip)
--{
-- iowrite8(ATML_STATUS_ABORT, chip->vendor->iobase + 1);
--}
--
--static u8 tpm_atml_status(struct tpm_chip *chip)
--{
-- return ioread8(chip->vendor->iobase + 1);
--}
--
--static struct file_operations atmel_ops = {
-- .owner = THIS_MODULE,
-- .llseek = no_llseek,
-- .open = tpm_open,
-- .read = tpm_read,
-- .write = tpm_write,
-- .release = tpm_release,
--};
--
--static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
--static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
--static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
--static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
--
--static struct attribute* atmel_attrs[] = {
-- &dev_attr_pubek.attr,
-- &dev_attr_pcrs.attr,
-- &dev_attr_caps.attr,
-- &dev_attr_cancel.attr,
-- NULL,
--};
--
--static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
--
--static struct tpm_vendor_specific tpm_atmel = {
-- .recv = tpm_atml_recv,
-- .send = tpm_atml_send,
-- .cancel = tpm_atml_cancel,
-- .status = tpm_atml_status,
-- .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
-- .req_complete_val = ATML_STATUS_DATA_AVAIL,
-- .req_canceled = ATML_STATUS_READY,
-- .attr_group = &atmel_attr_grp,
-- .miscdev = { .fops = &atmel_ops, },
--};
--
--static struct platform_device *pdev;
--
--static void atml_plat_remove(void)
--{
-- struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
--
-- if (chip) {
-- if (chip->vendor->have_region)
-- atmel_release_region(chip->vendor->base,
-- chip->vendor->region_size);
-- atmel_put_base_addr(chip->vendor);
-- tpm_remove_hardware(chip->dev);
-- platform_device_unregister(pdev);
-- }
--}
--
--static struct device_driver atml_drv = {
-- .name = "tpm_atmel",
-- .bus = &platform_bus_type,
-- .owner = THIS_MODULE,
-- .suspend = tpm_pm_suspend,
-- .resume = tpm_pm_resume,
--};
--
--static int __init init_atmel(void)
--{
-- int rc = 0;
--
-- driver_register(&atml_drv);
--
-- if ((tpm_atmel.iobase = atmel_get_base_addr(&tpm_atmel)) == NULL) {
-- rc = -ENODEV;
-- goto err_unreg_drv;
-- }
--
-- tpm_atmel.have_region =
-- (atmel_request_region
-- (tpm_atmel.base, tpm_atmel.region_size,
-- "tpm_atmel0") == NULL) ? 0 : 1;
--
-- if (IS_ERR
-- (pdev =
-- platform_device_register_simple("tpm_atmel", -1, NULL, 0))) {
-- rc = PTR_ERR(pdev);
-- goto err_rel_reg;
-- }
--
-- if ((rc = tpm_register_hardware(&pdev->dev, &tpm_atmel)) < 0)
-- goto err_unreg_dev;
-- return 0;
--
--err_unreg_dev:
-- platform_device_unregister(pdev);
--err_rel_reg:
-- atmel_put_base_addr(&tpm_atmel);
-- if (tpm_atmel.have_region)
-- atmel_release_region(tpm_atmel.base,
-- tpm_atmel.region_size);
--err_unreg_drv:
-- driver_unregister(&atml_drv);
-- return rc;
--}
--
--static void __exit cleanup_atmel(void)
--{
-- driver_unregister(&atml_drv);
-- atml_plat_remove();
--}
--
--module_init(init_atmel);
--module_exit(cleanup_atmel);
--
--MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
--MODULE_DESCRIPTION("TPM Driver");
--MODULE_VERSION("2.0");
--MODULE_LICENSE("GPL");
-diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
-deleted file mode 100644
-index 680a8e3..0000000
---- a/drivers/char/tpm/tpm_nsc.c
-+++ /dev/null
-@@ -1,395 +0,0 @@
--/*
-- * Copyright (C) 2004 IBM Corporation
-- *
-- * Authors:
-- * Leendert van Doorn <leendert@watson.ibm.com>
-- * Dave Safford <safford@watson.ibm.com>
-- * Reiner Sailer <sailer@watson.ibm.com>
-- * Kylene Hall <kjhall@us.ibm.com>
-- *
-- * Maintained by: <tpmdd_devel@lists.sourceforge.net>
-- *
-- * Device driver for TCG/TCPA TPM (trusted platform module).
-- * Specifications at www.trustedcomputinggroup.org
-- *
-- * This program is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License as
-- * published by the Free Software Foundation, version 2 of the
-- * License.
-- *
-- */
--
--#include <linux/platform_device.h>
--#include "tpm.h"
--
--/* National definitions */
--enum tpm_nsc_addr{
-- TPM_NSC_IRQ = 0x07,
-- TPM_NSC_BASE0_HI = 0x60,
-- TPM_NSC_BASE0_LO = 0x61,
-- TPM_NSC_BASE1_HI = 0x62,
-- TPM_NSC_BASE1_LO = 0x63
--};
--
--enum tpm_nsc_index {
-- NSC_LDN_INDEX = 0x07,
-- NSC_SID_INDEX = 0x20,
-- NSC_LDC_INDEX = 0x30,
-- NSC_DIO_INDEX = 0x60,
-- NSC_CIO_INDEX = 0x62,
-- NSC_IRQ_INDEX = 0x70,
-- NSC_ITS_INDEX = 0x71
--};
--
--enum tpm_nsc_status_loc {
-- NSC_STATUS = 0x01,
-- NSC_COMMAND = 0x01,
-- NSC_DATA = 0x00
--};
--
--/* status bits */
--enum tpm_nsc_status {
-- NSC_STATUS_OBF = 0x01, /* output buffer full */
-- NSC_STATUS_IBF = 0x02, /* input buffer full */
-- NSC_STATUS_F0 = 0x04, /* F0 */
-- NSC_STATUS_A2 = 0x08, /* A2 */
-- NSC_STATUS_RDY = 0x10, /* ready to receive command */
-- NSC_STATUS_IBR = 0x20 /* ready to receive data */
--};
--
--/* command bits */
--enum tpm_nsc_cmd_mode {
-- NSC_COMMAND_NORMAL = 0x01, /* normal mode */
-- NSC_COMMAND_EOC = 0x03,
-- NSC_COMMAND_CANCEL = 0x22
--};
--/*
-- * Wait for a certain status to appear
-- */
--static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
--{
-- unsigned long stop;
--
-- /* status immediately available check */
-- *data = inb(chip->vendor->base + NSC_STATUS);
-- if ((*data & mask) == val)
-- return 0;
--
-- /* wait for status */
-- stop = jiffies + 10 * HZ;
-- do {
-- msleep(TPM_TIMEOUT);
-- *data = inb(chip->vendor->base + 1);
-- if ((*data & mask) == val)
-- return 0;
-- }
-- while (time_before(jiffies, stop));
--
-- return -EBUSY;
--}
--
--static int nsc_wait_for_ready(struct tpm_chip *chip)
--{
-- int status;
-- unsigned long stop;
--
-- /* status immediately available check */
-- status = inb(chip->vendor->base + NSC_STATUS);
-- if (status & NSC_STATUS_OBF)
-- status = inb(chip->vendor->base + NSC_DATA);
-- if (status & NSC_STATUS_RDY)
-- return 0;
--
-- /* wait for status */
-- stop = jiffies + 100;
-- do {
-- msleep(TPM_TIMEOUT);
-- status = inb(chip->vendor->base + NSC_STATUS);
-- if (status & NSC_STATUS_OBF)
-- status = inb(chip->vendor->base + NSC_DATA);
-- if (status & NSC_STATUS_RDY)
-- return 0;
-- }
-- while (time_before(jiffies, stop));
--
-- dev_info(chip->dev, "wait for ready failed\n");
-- return -EBUSY;
--}
--
--
--static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
--{
-- u8 *buffer = buf;
-- u8 data, *p;
-- u32 size;
-- __be32 *native_size;
--
-- if (count < 6)
-- return -EIO;
--
-- if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
-- dev_err(chip->dev, "F0 timeout\n");
-- return -EIO;
-- }
-- if ((data =
-- inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
-- dev_err(chip->dev, "not in normal mode (0x%x)\n",
-- data);
-- return -EIO;
-- }
--
-- /* read the whole packet */
-- for (p = buffer; p < &buffer[count]; p++) {
-- if (wait_for_stat
-- (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
-- dev_err(chip->dev,
-- "OBF timeout (while reading data)\n");
-- return -EIO;
-- }
-- if (data & NSC_STATUS_F0)
-- break;
-- *p = inb(chip->vendor->base + NSC_DATA);
-- }
--
-- if ((data & NSC_STATUS_F0) == 0 &&
-- (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
-- dev_err(chip->dev, "F0 not set\n");
-- return -EIO;
-- }
-- if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) {
-- dev_err(chip->dev,
-- "expected end of command(0x%x)\n", data);
-- return -EIO;
-- }
--
-- native_size = (__force __be32 *) (buf + 2);
-- size = be32_to_cpu(*native_size);
--
-- if (count < size)
-- return -EIO;
--
-- return size;
--}
--
--static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
--{
-- u8 data;
-- int i;
--
-- /*
-- * If we hit the chip with back to back commands it locks up
-- * and never set IBF. Hitting it with this "hammer" seems to
-- * fix it. Not sure why this is needed, we followed the flow
-- * chart in the manual to the letter.
-- */
-- outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
--
-- if (nsc_wait_for_ready(chip) != 0)
-- return -EIO;
--
-- if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
-- dev_err(chip->dev, "IBF timeout\n");
-- return -EIO;
-- }
--
-- outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND);
-- if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
-- dev_err(chip->dev, "IBR timeout\n");
-- return -EIO;
-- }
--
-- for (i = 0; i < count; i++) {
-- if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
-- dev_err(chip->dev,
-- "IBF timeout (while writing data)\n");
-- return -EIO;
-- }
-- outb(buf[i], chip->vendor->base + NSC_DATA);
-- }
--
-- if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
-- dev_err(chip->dev, "IBF timeout\n");
-- return -EIO;
-- }
-- outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND);
--
-- return count;
--}
--
--static void tpm_nsc_cancel(struct tpm_chip *chip)
--{
-- outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
--}
--
--static u8 tpm_nsc_status(struct tpm_chip *chip)
--{
-- return inb(chip->vendor->base + NSC_STATUS);
--}
--
--static struct file_operations nsc_ops = {
-- .owner = THIS_MODULE,
-- .llseek = no_llseek,
-- .open = tpm_open,
-- .read = tpm_read,
-- .write = tpm_write,
-- .release = tpm_release,
--};
--
--static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
--static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
--static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
--static DEVICE_ATTR(cancel, S_IWUSR|S_IWGRP, NULL, tpm_store_cancel);
--
--static struct attribute * nsc_attrs[] = {
-- &dev_attr_pubek.attr,
-- &dev_attr_pcrs.attr,
-- &dev_attr_caps.attr,
-- &dev_attr_cancel.attr,
-- NULL,
--};
--
--static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
--
--static struct tpm_vendor_specific tpm_nsc = {
-- .recv = tpm_nsc_recv,
-- .send = tpm_nsc_send,
-- .cancel = tpm_nsc_cancel,
-- .status = tpm_nsc_status,
-- .req_complete_mask = NSC_STATUS_OBF,
-- .req_complete_val = NSC_STATUS_OBF,
-- .req_canceled = NSC_STATUS_RDY,
-- .attr_group = &nsc_attr_grp,
-- .miscdev = { .fops = &nsc_ops, },
--};
--
--static struct platform_device *pdev = NULL;
--
--static void __devexit tpm_nsc_remove(struct device *dev)
--{
-- struct tpm_chip *chip = dev_get_drvdata(dev);
-- if ( chip ) {
-- release_region(chip->vendor->base, 2);
-- tpm_remove_hardware(chip->dev);
-- }
--}
--
--static struct device_driver nsc_drv = {
-- .name = "tpm_nsc",
-- .bus = &platform_bus_type,
-- .owner = THIS_MODULE,
-- .suspend = tpm_pm_suspend,
-- .resume = tpm_pm_resume,
--};
--
--static int __init init_nsc(void)
--{
-- int rc = 0;
-- int lo, hi;
-- int nscAddrBase = TPM_ADDR;
--
--
-- /* verify that it is a National part (SID) */
-- if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
-- nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)|
-- (tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE);
-- if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6)
-- return -ENODEV;
-- }
--
-- driver_register(&nsc_drv);
--
-- hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
-- lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
-- tpm_nsc.base = (hi<<8) | lo;
--
-- /* enable the DPM module */
-- tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
--
-- pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
-- if (!pdev) {
-- rc = -ENOMEM;
-- goto err_unreg_drv;
-- }
--
-- pdev->name = "tpm_nscl0";
-- pdev->id = -1;
-- pdev->num_resources = 0;
-- pdev->dev.release = tpm_nsc_remove;
-- pdev->dev.driver = &nsc_drv;
--
-- if ((rc = platform_device_register(pdev)) < 0)
-- goto err_free_dev;
--
-- if (request_region(tpm_nsc.base, 2, "tpm_nsc0") == NULL ) {
-- rc = -EBUSY;
-- goto err_unreg_dev;
-- }
--
-- if ((rc = tpm_register_hardware(&pdev->dev, &tpm_nsc)) < 0)
-- goto err_rel_reg;
--
-- dev_dbg(&pdev->dev, "NSC TPM detected\n");
-- dev_dbg(&pdev->dev,
-- "NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
-- tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20),
-- tpm_read_index(nscAddrBase,0x27));
-- dev_dbg(&pdev->dev,
-- "NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
-- tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25),
-- tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28));
-- dev_dbg(&pdev->dev, "NSC IO Base0 0x%x\n",
-- (tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61));
-- dev_dbg(&pdev->dev, "NSC IO Base1 0x%x\n",
-- (tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63));
-- dev_dbg(&pdev->dev, "NSC Interrupt number and wakeup 0x%x\n",
-- tpm_read_index(nscAddrBase,0x70));
-- dev_dbg(&pdev->dev, "NSC IRQ type select 0x%x\n",
-- tpm_read_index(nscAddrBase,0x71));
-- dev_dbg(&pdev->dev,
-- "NSC DMA channel select0 0x%x, select1 0x%x\n",
-- tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75));
-- dev_dbg(&pdev->dev,
-- "NSC Config "
-- "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
-- tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1),
-- tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3),
-- tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5),
-- tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7),
-- tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9));
--
-- dev_info(&pdev->dev,
-- "NSC TPM revision %d\n",
-- tpm_read_index(nscAddrBase, 0x27) & 0x1F);
--
-- return 0;
--
--err_rel_reg:
-- release_region(tpm_nsc.base, 2);
--err_unreg_dev:
-- platform_device_unregister(pdev);
--err_free_dev:
-- kfree(pdev);
--err_unreg_drv:
-- driver_unregister(&nsc_drv);
-- return rc;
--}
--
--static void __exit cleanup_nsc(void)
--{
-- if (pdev) {
-- tpm_nsc_remove(&pdev->dev);
-- platform_device_unregister(pdev);
-- kfree(pdev);
-- pdev = NULL;
-- }
--
-- driver_unregister(&nsc_drv);
--}
--
--module_init(init_nsc);
--module_exit(cleanup_nsc);
--
--MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
--MODULE_DESCRIPTION("TPM Driver");
--MODULE_VERSION("2.0");
--MODULE_LICENSE("GPL");
-diff --git a/drivers/char/tpm/tpm_xen.c b/drivers/char/tpm/tpm_xen.c
-new file mode 100644
-index 0000000..0ee6920
---- /dev/null
-+++ b/drivers/char/tpm/tpm_xen.c
-@@ -0,0 +1,536 @@
-+/*
-+ * Copyright (C) 2004 IBM Corporation
-+ *
-+ * Authors:
-+ * Leendert van Doorn <leendert@watson.ibm.com>
-+ * Dave Safford <safford@watson.ibm.com>
-+ * Reiner Sailer <sailer@watson.ibm.com>
-+ * Kylene Hall <kjhall@us.ibm.com>
-+ * Stefan Berger <stefanb@us.ibm.com>
-+ *
-+ * Maintained by: <tpmdd_devel@lists.sourceforge.net>
-+ *
-+ * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
-+ * Specifications at www.trustedcomputinggroup.org
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation, version 2 of the
-+ * License.
-+ *
-+ */
-+
-+#include <asm/uaccess.h>
-+#include <linux/list.h>
-+#include <xen/tpmfe.h>
-+#include <linux/device.h>
-+#include <linux/interrupt.h>
-+#include <linux/platform_device.h>
-+#include "tpm.h"
-+
-+/* read status bits */
-+enum {
-+ STATUS_BUSY = 0x01,
-+ STATUS_DATA_AVAIL = 0x02,
-+ STATUS_READY = 0x04
-+};
-+
-+#define MIN(x,y) ((x) < (y)) ? (x) : (y)
-+
-+struct transmission {
-+ struct list_head next;
-+ unsigned char *request;
-+ unsigned int request_len;
-+ unsigned char *rcv_buffer;
-+ unsigned int buffersize;
-+ unsigned int flags;
-+};
-+
-+enum {
-+ TRANSMISSION_FLAG_WAS_QUEUED = 0x1
-+};
-+
-+struct data_exchange {
-+ struct transmission *current_request;
-+ spinlock_t req_list_lock;
-+ wait_queue_head_t req_wait_queue;
-+
-+ struct list_head queued_requests;
-+
-+ struct transmission *current_response;
-+ spinlock_t resp_list_lock;
-+ wait_queue_head_t resp_wait_queue; // processes waiting for responses
-+
-+ struct transmission *req_cancelled; // if a cancellation was encounterd
-+
-+ unsigned int fe_status;
-+ unsigned int flags;
-+};
-+
-+enum {
-+ DATAEX_FLAG_QUEUED_ONLY = 0x1
-+};
-+
-+static struct data_exchange dataex;
-+
-+static unsigned long disconnect_time;
-+
-+static struct tpmfe_device tpmfe;
-+
-+/* local function prototypes */
-+static void __exit cleanup_xen(void);
-+
-+
-+/* =============================================================
-+ * Some utility functions
-+ * =============================================================
-+ */
-+static inline struct transmission *
-+transmission_alloc(void)
-+{
-+ return kzalloc(sizeof(struct transmission), GFP_KERNEL);
-+}
-+
-+static inline unsigned char *
-+transmission_set_buffer(struct transmission *t,
-+ unsigned char *buffer, unsigned int len)
-+{
-+ kfree(t->request);
-+ t->request = kmalloc(len, GFP_KERNEL);
-+ if (t->request) {
-+ memcpy(t->request,
-+ buffer,
-+ len);
-+ t->request_len = len;
-+ }
-+ return t->request;
-+}
-+
-+static inline void
-+transmission_free(struct transmission *t)
-+{
-+ kfree(t->request);
-+ kfree(t->rcv_buffer);
-+ kfree(t);
-+}
-+
-+/* =============================================================
-+ * Interface with the TPM shared memory driver for XEN
-+ * =============================================================
-+ */
-+static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
-+{
-+ int ret_size = 0;
-+ struct transmission *t;
-+
-+ /*
-+ * The list with requests must contain one request
-+ * only and the element there must be the one that
-+ * was passed to me from the front-end.
-+ */
-+ if (dataex.current_request != ptr) {
-+ printk("WARNING: The request pointer is different than the "
-+ "pointer the shared memory driver returned to me. "
-+ "%p != %p\n",
-+ dataex.current_request, ptr);
-+ }
-+
-+ /*
-+ * If the request has been cancelled, just quit here
-+ */
-+ if (dataex.req_cancelled == (struct transmission *)ptr) {
-+ if (dataex.current_request == dataex.req_cancelled) {
-+ dataex.current_request = NULL;
-+ }
-+ transmission_free(dataex.req_cancelled);
-+ dataex.req_cancelled = NULL;
-+ return 0;
-+ }
-+
-+ if (NULL != (t = dataex.current_request)) {
-+ transmission_free(t);
-+ dataex.current_request = NULL;
-+ }
-+
-+ t = transmission_alloc();
-+ if (t) {
-+ unsigned long flags;
-+ t->rcv_buffer = kmalloc(count, GFP_KERNEL);
-+ if (! t->rcv_buffer) {
-+ transmission_free(t);
-+ return -ENOMEM;
-+ }
-+ t->buffersize = count;
-+ memcpy(t->rcv_buffer, buffer, count);
-+ ret_size = count;
-+
-+ spin_lock_irqsave(&dataex.resp_list_lock ,flags);
-+ dataex.current_response = t;
-+ spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+ wake_up_interruptible(&dataex.resp_wait_queue);
-+ }
-+ return ret_size;
-+}
-+
-+
-+static void tpm_fe_status(unsigned int flags)
-+{
-+ dataex.fe_status = flags;
-+ if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
-+ disconnect_time = jiffies;
-+ }
-+}
-+
-+/* =============================================================
-+ * Interface with the generic TPM driver
-+ * =============================================================
-+ */
-+static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
-+{
-+ unsigned long flags;
-+ int rc = 0;
-+
-+ spin_lock_irqsave(&dataex.resp_list_lock, flags);
-+ /*
-+ * Check if the previous operation only queued the command
-+ * In this case there won't be a response, so I just
-+ * return from here and reset that flag. In any other
-+ * case I should receive a response from the back-end.
-+ */
-+ if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
-+ dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
-+ spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+ /*
-+ * a little hack here. The first few measurements
-+ * are queued since there's no way to talk to the
-+ * TPM yet (due to slowness of the control channel)
-+ * So we just make IMA happy by giving it 30 NULL
-+ * bytes back where the most important part is
-+ * that the result code is '0'.
-+ */
-+
-+ count = MIN(count, 30);
-+ memset(buf, 0x0, count);
-+ return count;
-+ }
-+ /*
-+ * Check whether something is in the responselist and if
-+ * there's nothing in the list wait for something to appear.
-+ */
-+
-+ if (NULL == dataex.current_response) {
-+ spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+ interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
-+ 1000);
-+ spin_lock_irqsave(&dataex.resp_list_lock ,flags);
-+ }
-+
-+ if (NULL != dataex.current_response) {
-+ struct transmission *t = dataex.current_response;
-+ dataex.current_response = NULL;
-+ rc = MIN(count, t->buffersize);
-+ memcpy(buf, t->rcv_buffer, rc);
-+ transmission_free(t);
-+ }
-+
-+ spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+ return rc;
-+}
-+
-+static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
-+{
-+ /*
-+ * We simply pass the packet onto the XEN shared
-+ * memory driver.
-+ */
-+ unsigned long flags;
-+ int rc;
-+ struct transmission *t = transmission_alloc();
-+
-+ spin_lock_irqsave(&dataex.req_list_lock, flags);
-+ /*
-+ * If there's a current request, it must be the
-+ * previous request that has timed out.
-+ */
-+ if (dataex.current_request != NULL) {
-+ printk("WARNING: Sending although there is a request outstanding.\n"
-+ " Previous request must have timed out.\n");
-+ transmission_free(dataex.current_request);
-+ dataex.current_request = NULL;
-+ }
-+
-+ if (t != NULL) {
-+ unsigned int error = 0;
-+ /*
-+ * Queue the packet if the driver below is not
-+ * ready, yet, or there is any packet already
-+ * in the queue.
-+ * If the driver below is ready, unqueue all
-+ * packets first before sending our current
-+ * packet.
-+ * For each unqueued packet, except for the
-+ * last (=current) packet, call the function
-+ * tpm_xen_recv to wait for the response to come
-+ * back.
-+ */
-+ if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
-+ if (time_after(jiffies, disconnect_time + HZ * 10)) {
-+ rc = -ENOENT;
-+ } else {
-+ /*
-+ * copy the request into the buffer
-+ */
-+ if (transmission_set_buffer(t, buf, count)
-+ == NULL) {
-+ transmission_free(t);
-+ rc = -ENOMEM;
-+ goto exit;
-+ }
-+ dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
-+ list_add_tail(&t->next, &dataex.queued_requests);
-+ rc = 0;
-+ }
-+ } else {
-+ /*
-+ * Check whether there are any packets in the queue
-+ */
-+ while (!list_empty(&dataex.queued_requests)) {
-+ /*
-+ * Need to dequeue them.
-+ * Read the result into a dummy buffer.
-+ */
-+ unsigned char buffer[1];
-+ struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
-+ list_del(&qt->next);
-+ dataex.current_request = qt;
-+ spin_unlock_irqrestore(&dataex.req_list_lock,
-+ flags);
-+
-+ rc = tpm_fe_send(tpmfe.tpm_private,
-+ qt->request,
-+ qt->request_len,
-+ qt);
-+
-+ if (rc < 0) {
-+ spin_lock_irqsave(&dataex.req_list_lock, flags);
-+ if ((qt = dataex.current_request) != NULL) {
-+ /*
-+ * requeue it at the beginning
-+ * of the list
-+ */
-+ list_add(&qt->next,
-+ &dataex.queued_requests);
-+ }
-+ dataex.current_request = NULL;
-+ error = 1;
-+ break;
-+ }
-+ /*
-+ * After this point qt is not valid anymore!
-+ * It is freed when the front-end is delivering the data
-+ * by calling tpm_recv
-+ */
-+
-+ /*
-+ * Try to receive the response now into the provided dummy
-+ * buffer (I don't really care about this response since
-+ * there is no receiver anymore for this response)
-+ */
-+ rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
-+
-+ spin_lock_irqsave(&dataex.req_list_lock, flags);
-+ }
-+
-+ if (error == 0) {
-+ /*
-+ * Finally, send the current request.
-+ */
-+ dataex.current_request = t;
-+ /*
-+ * Call the shared memory driver
-+ * Pass to it the buffer with the request, the
-+ * amount of bytes in the request and
-+ * a void * pointer (here: transmission structure)
-+ */
-+ rc = tpm_fe_send(tpmfe.tpm_private,
-+ buf, count, t);
-+ /*
-+ * The generic TPM driver will call
-+ * the function to receive the response.
-+ */
-+ if (rc < 0) {
-+ dataex.current_request = NULL;
-+ goto queue_it;
-+ }
-+ } else {
-+queue_it:
-+ if (transmission_set_buffer(t, buf, count) == NULL) {
-+ transmission_free(t);
-+ rc = -ENOMEM;
-+ goto exit;
-+ }
-+ /*
-+ * An error occurred. Don't event try
-+ * to send the current request. Just
-+ * queue it.
-+ */
-+ dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
-+ list_add_tail(&t->next,
-+ &dataex.queued_requests);
-+ rc = 0;
-+ }
-+ }
-+ } else {
-+ rc = -ENOMEM;
-+ }
-+
-+exit:
-+ spin_unlock_irqrestore(&dataex.req_list_lock, flags);
-+ return rc;
-+}
-+
-+static void tpm_xen_cancel(struct tpm_chip *chip)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&dataex.resp_list_lock,flags);
-+
-+ dataex.req_cancelled = dataex.current_request;
-+
-+ spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
-+}
-+
-+static u8 tpm_xen_status(struct tpm_chip *chip)
-+{
-+ unsigned long flags;
-+ u8 rc = 0;
-+ spin_lock_irqsave(&dataex.resp_list_lock, flags);
-+ /*
-+ * Data are available if:
-+ * - there's a current response
-+ * - the last packet was queued only (this is fake, but necessary to
-+ * get the generic TPM layer to call the receive function.)
-+ */
-+ if (NULL != dataex.current_response ||
-+ 0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
-+ rc = STATUS_DATA_AVAIL;
-+ }
-+ spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+ return rc;
-+}
-+
-+static struct file_operations tpm_xen_ops = {
-+ .owner = THIS_MODULE,
-+ .llseek = no_llseek,
-+ .open = tpm_open,
-+ .read = tpm_read,
-+ .write = tpm_write,
-+ .release = tpm_release,
-+};
-+
-+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-+static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
-+
-+static struct attribute* xen_attrs[] = {
-+ &dev_attr_pubek.attr,
-+ &dev_attr_pcrs.attr,
-+ &dev_attr_caps.attr,
-+ &dev_attr_cancel.attr,
-+ NULL,
-+};
-+
-+static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
-+
-+static struct tpm_vendor_specific tpm_xen = {
-+ .recv = tpm_xen_recv,
-+ .send = tpm_xen_send,
-+ .cancel = tpm_xen_cancel,
-+ .status = tpm_xen_status,
-+ .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
-+ .req_complete_val = STATUS_DATA_AVAIL,
-+ .req_canceled = STATUS_READY,
-+ .base = 0,
-+ .attr_group = &xen_attr_grp,
-+ .miscdev.fops = &tpm_xen_ops,
-+ .buffersize = 64 * 1024,
-+};
-+
-+static struct platform_device *pdev;
-+
-+static struct tpmfe_device tpmfe = {
-+ .receive = tpm_recv,
-+ .status = tpm_fe_status,
-+};
-+
-+
-+static int __init init_xen(void)
-+{
-+ int rc;
-+
-+ if ((xen_start_info->flags & SIF_INITDOMAIN)) {
-+ return -EPERM;
-+ }
-+ /*
-+ * Register device with the low lever front-end
-+ * driver
-+ */
-+ if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
-+ goto err_exit;
-+ }
-+
-+ /*
-+ * Register our device with the system.
-+ */
-+ pdev = platform_device_register_simple("tpm_vtpm", -1, NULL, 0);
-+ if (IS_ERR(pdev)) {
-+ rc = PTR_ERR(pdev);
-+ goto err_unreg_fe;
-+ }
-+
-+ tpm_xen.buffersize = tpmfe.max_tx_size;
-+
-+ if ((rc = tpm_register_hardware(&pdev->dev, &tpm_xen)) < 0) {
-+ goto err_unreg_pdev;
-+ }
-+
-+ dataex.current_request = NULL;
-+ spin_lock_init(&dataex.req_list_lock);
-+ init_waitqueue_head(&dataex.req_wait_queue);
-+ INIT_LIST_HEAD(&dataex.queued_requests);
-+
-+ dataex.current_response = NULL;
-+ spin_lock_init(&dataex.resp_list_lock);
-+ init_waitqueue_head(&dataex.resp_wait_queue);
-+
-+ disconnect_time = jiffies;
-+
-+ return 0;
-+
-+
-+err_unreg_pdev:
-+ platform_device_unregister(pdev);
-+err_unreg_fe:
-+ tpm_fe_unregister_receiver();
-+
-+err_exit:
-+ return rc;
-+}
-+
-+static void __exit cleanup_xen(void)
-+{
-+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
-+ if (chip) {
-+ tpm_remove_hardware(chip->dev);
-+ platform_device_unregister(pdev);
-+ tpm_fe_unregister_receiver();
-+ }
-+}
-+
-+module_init(init_xen);
-+module_exit(cleanup_xen);
-+
-+MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
-+MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
-+MODULE_VERSION("1.0");
-+MODULE_LICENSE("GPL");
-diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
-index 53d3d06..752834f 100644
---- a/drivers/char/tty_io.c
-+++ b/drivers/char/tty_io.c
-@@ -132,6 +132,8 @@ LIST_HEAD(tty_drivers); /* linked list
- vt.c for deeply disgusting hack reasons */
- DECLARE_MUTEX(tty_sem);
-
-+int console_use_vt = 1;
-+
- #ifdef CONFIG_UNIX98_PTYS
- extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
- extern int pty_limit; /* Config limit on Unix98 ptys */
-@@ -2054,7 +2056,7 @@ retry_open:
- goto got_driver;
- }
- #ifdef CONFIG_VT
-- if (device == MKDEV(TTY_MAJOR,0)) {
-+ if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
- extern struct tty_driver *console_driver;
- driver = console_driver;
- index = fg_console;
-@@ -3237,6 +3239,8 @@ static int __init tty_init(void)
- #endif
-
- #ifdef CONFIG_VT
-+ if (!console_use_vt)
-+ goto out_vt;
- cdev_init(&vc0_cdev, &console_fops);
- if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
- register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
-@@ -3245,6 +3249,7 @@ static int __init tty_init(void)
- class_device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
-
- vty_init();
-+ out_vt:
- #endif
- return 0;
- }
-diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
-index 1e371a5..f0dff5a 100644
---- a/drivers/firmware/Kconfig
-+++ b/drivers/firmware/Kconfig
-@@ -8,7 +8,7 @@ menu "Firmware Drivers"
- config EDD
- tristate "BIOS Enhanced Disk Drive calls determine boot disk (EXPERIMENTAL)"
- depends on EXPERIMENTAL
-- depends on !IA64
-+ depends on !IA64 && !XEN
- help
- Say Y or M here if you want to enable BIOS Enhanced Disk Drive
- Services real mode BIOS calls to determine which disk
-diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
-index 6439dec..835d108 100644
---- a/drivers/ide/legacy/hd.c
-+++ b/drivers/ide/legacy/hd.c
-@@ -718,7 +718,7 @@ static int __init hd_init(void)
- device_timer.function = hd_times_out;
- blk_queue_hardsect_size(hd_queue, 512);
-
--#ifdef __i386__
-+#if defined(__i386__) && !defined(CONFIG_XEN)
- if (!NR_HD) {
- extern struct drive_info drive_info;
- unsigned char *BIOS = (unsigned char *) &drive_info;
-diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
-index f187fd8..a47a5df 100644
---- a/drivers/pci/Kconfig
-+++ b/drivers/pci/Kconfig
-@@ -5,6 +5,7 @@ config PCI_MSI
- bool "Message Signaled Interrupts (MSI and MSI-X)"
- depends on PCI
- depends on (X86_LOCAL_APIC && X86_IO_APIC) || IA64
-+ depends on !XEN
- help
- This allows device drivers to enable MSI (Message Signaled
- Interrupts). Message Signaled Interrupts enable a device to
-diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
-index b3c561a..fa1fdb0 100644
---- a/drivers/serial/Kconfig
-+++ b/drivers/serial/Kconfig
-@@ -11,6 +11,7 @@ menu "Serial drivers"
- config SERIAL_8250
- tristate "8250/16550 and compatible serial support"
- depends on (BROKEN || !SPARC)
-+ depends on !XEN_DISABLE_SERIAL
- select SERIAL_CORE
- ---help---
- This selects whether you want to include the driver for the standard
-diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
-index f5079c7..cb5942d 100644
---- a/drivers/video/Kconfig
-+++ b/drivers/video/Kconfig
-@@ -495,7 +495,7 @@ config FB_HGA_ACCEL
-
- config VIDEO_SELECT
- bool
-- depends on (FB = y) && X86
-+ depends on (FB = y) && X86 && !XEN
- default y
-
- config FB_SGIVW
-diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
-index 7c74e73..73116e3 100644
---- a/drivers/video/fbmon.c
-+++ b/drivers/video/fbmon.c
-@@ -1281,7 +1281,7 @@ int fb_validate_mode(const struct fb_var
- -EINVAL : 0;
- }
-
--#if defined(__i386__)
-+#if defined(__i386__) && !defined(CONFIG_XEN)
- #include <linux/pci.h>
-
- /*
-diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
-new file mode 100644
-index 0000000..7a6a569
---- /dev/null
-+++ b/drivers/xen/Kconfig
-@@ -0,0 +1,200 @@
-+#
-+# This Kconfig describe xen options
-+#
-+
-+mainmenu "Xen Configuration"
-+
-+config XEN
-+ bool
-+ default y if X86_XEN || X86_64_XEN
-+ help
-+ This is the Linux Xen port.
-+
-+config NO_IDLE_HZ
-+ bool
-+ depends on XEN
-+ default y
-+
-+if XEN
-+menu "XEN"
-+
-+config XEN_PRIVILEGED_GUEST
-+ bool "Privileged Guest (domain 0)"
-+ depends XEN
-+ default n
-+ help
-+ Support for privileged operation (domain 0)
-+
-+config XEN_UNPRIVILEGED_GUEST
-+ bool
-+ default !XEN_PRIVILEGED_GUEST
-+
-+config XEN_PCIDEV_BACKEND
-+ bool "PCI device backend driver"
-+ select PCI
-+ default y if XEN_PRIVILEGED_GUEST
-+ help
-+ The PCI device backend driver allows the kernel to export arbitrary
-+ PCI devices to other guests.
-+
-+choice
-+ prompt "PCI Backend Mode"
-+ depends on XEN_PCIDEV_BACKEND
-+ default XEN_PCIDEV_BACKEND_VPCI
-+
-+config XEN_PCIDEV_BACKEND_VPCI
-+ bool "Virtual PCI"
-+ ---help---
-+ This PCI Backend hides the true PCI topology and makes the frontend
-+ think there is a single PCI bus with only the exported devices on it.
-+ For example, a device at 03:05.0 will be re-assigned to 00:00.0. A
-+ second device at 02:1a.0 will be re-assigned to 00:01.0.
-+
-+config XEN_PCIDEV_BACKEND_PASS
-+ bool "Passthrough"
-+ ---help---
-+ This PCI Backend provides a real view of the PCI topology to the
-+ frontend (for example, a device at 06:01.b will still appear at
-+ 06:01.b to the frontend). This is similar to how Xen 2.0.x exposed
-+ PCI devices to its driver domains. This may be required for drivers
-+ which depend on finding their hardward in certain bus/slot
-+ locations.
-+
-+endchoice
-+
-+config XEN_PCIDEV_BE_DEBUG
-+ bool "PCI Backend Debugging"
-+ depends on XEN_PCIDEV_BACKEND
-+ default n
-+
-+config XEN_BLKDEV_BACKEND
-+ tristate "Block-device backend driver"
-+ default y
-+ help
-+ The block-device backend driver allows the kernel to export its
-+ block devices to other guests via a high-performance shared-memory
-+ interface.
-+
-+config XEN_BLKDEV_TAP_BE
-+ tristate "Block Tap support for backend driver (DANGEROUS)"
-+ depends on XEN_BLKDEV_BACKEND
-+ default n
-+ help
-+ If you intend to use the block tap driver, the backend domain will
-+ not know the domain id of the real frontend, and so will not be able
-+ to map its data pages. This modifies the backend to attempt to map
-+ from both the tap domain and the real frontend. This presents a
-+ security risk, and so should ONLY be used for development
-+ with the blktap. This option will be removed as the block drivers are
-+ modified to use grant tables.
-+
-+config XEN_NETDEV_BACKEND
-+ tristate "Network-device backend driver"
-+ default y
-+ help
-+ The network-device backend driver allows the kernel to export its
-+ network devices to other guests via a high-performance shared-memory
-+ interface.
-+
-+config XEN_NETDEV_PIPELINED_TRANSMITTER
-+ bool "Pipelined transmitter (DANGEROUS)"
-+ depends on XEN_NETDEV_BACKEND
-+ default n
-+ help
-+ If the net backend is a dumb domain, such as a transparent Ethernet
-+ bridge with no local IP interface, it is safe to say Y here to get
-+ slightly lower network overhead.
-+ If the backend has a local IP interface; or may be doing smart things
-+ like reassembling packets to perform firewall filtering; or if you
-+ are unsure; or if you experience network hangs when this option is
-+ enabled; then you must say N here.
-+
-+config XEN_NETDEV_LOOPBACK
-+ tristate "Network-device loopback driver"
-+ depends on XEN_NETDEV_BACKEND
-+ default y
-+ help
-+ A two-interface loopback device to emulate a local netfront-netback
-+ connection.
-+
-+config XEN_TPMDEV_BACKEND
-+ tristate "TPM-device backend driver"
-+ default n
-+ help
-+ The TPM-device backend driver
-+
-+config XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
-+ bool "TPM backend closes upon vTPM failure"
-+ depends on XEN_TPMDEV_BACKEND
-+ default n
-+ help
-+ The TPM backend closes the channel if the vTPM in userspace indicates
-+ a failure. The corresponding domain's channel will be closed.
-+ Say Y if you want this feature.
-+
-+config XEN_BLKDEV_FRONTEND
-+ tristate "Block-device frontend driver"
-+ depends on XEN
-+ default y
-+ help
-+ The block-device frontend driver allows the kernel to access block
-+ devices mounted within another guest OS. Unless you are building a
-+ dedicated device-driver domain, or your master control domain
-+ (domain 0), then you almost certainly want to say Y here.
-+
-+config XEN_NETDEV_FRONTEND
-+ tristate "Network-device frontend driver"
-+ depends on XEN
-+ default y
-+ help
-+ The network-device frontend driver allows the kernel to access
-+ network interfaces within another guest OS. Unless you are building a
-+ dedicated device-driver domain, or your master control domain
-+ (domain 0), then you almost certainly want to say Y here.
-+
-+config XEN_BLKDEV_TAP
-+ tristate "Block device tap driver"
-+ default n
-+ help
-+ This driver allows a VM to interact on block device channels
-+ to other VMs. Block messages may be passed through or redirected
-+ to a character device, allowing device prototyping in application
-+ space. Odds are that you want to say N here.
-+
-+config XEN_TPMDEV_FRONTEND
-+ tristate "TPM-device frontend driver"
-+ default n
-+ select TCG_TPM
-+ select TCG_XEN
-+ help
-+ The TPM-device frontend driver.
-+
-+config XEN_SCRUB_PAGES
-+ bool "Scrub memory before freeing it to Xen"
-+ default y
-+ help
-+ Erase memory contents before freeing it back to Xen's global
-+ pool. This ensures that any secrets contained within that
-+ memory (e.g., private keys) cannot be found by other guests that
-+ may be running on the machine. Most people will want to say Y here.
-+ If security is not a concern then you may increase performance by
-+ saying N.
-+
-+config XEN_DISABLE_SERIAL
-+ bool "Disable serial port drivers"
-+ default y
-+ help
-+ Disable serial port drivers, allowing the Xen console driver
-+ to provide a serial console at ttyS0.
-+
-+endmenu
-+
-+config HAVE_ARCH_ALLOC_SKB
-+ bool
-+ default y
-+
-+config HAVE_ARCH_DEV_ALLOC_SKB
-+ bool
-+ default y
-+
-+endif
-diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
-new file mode 100644
-index 0000000..657e999
---- /dev/null
-+++ b/drivers/xen/Makefile
-@@ -0,0 +1,22 @@
-+
-+obj-y += net_driver_util.o
-+obj-y += util.o
-+
-+obj-y += core/
-+obj-y += char/
-+obj-y += console/
-+obj-y += evtchn/
-+obj-y += balloon/
-+obj-y += privcmd/
-+obj-y += xenbus/
-+
-+obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
-+obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
-+obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/
-+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
-+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
-+obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
-+obj-$(CONFIG_XEN_TPMDEV_FRONTEND) += tpmfront/
-+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
-+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront/
-+
-diff --git a/drivers/xen/balloon/Makefile b/drivers/xen/balloon/Makefile
-new file mode 100644
-index 0000000..0e3a348
---- /dev/null
-+++ b/drivers/xen/balloon/Makefile
-@@ -0,0 +1,2 @@
-+
-+obj-y += balloon.o
-diff --git a/drivers/xen/balloon/balloon.c b/drivers/xen/balloon/balloon.c
-new file mode 100644
-index 0000000..6a70087
---- /dev/null
-+++ b/drivers/xen/balloon/balloon.c
-@@ -0,0 +1,585 @@
-+/******************************************************************************
-+ * balloon.c
-+ *
-+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
-+ *
-+ * Copyright (c) 2003, B Dragovic
-+ * Copyright (c) 2003-2004, M Williamson, K Fraser
-+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/smp_lock.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/highmem.h>
-+#include <linux/vmalloc.h>
-+#include <xen/xen_proc.h>
-+#include <asm/hypervisor.h>
-+#include <xen/balloon.h>
-+#include <xen/interface/memory.h>
-+#include <asm/pgalloc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/tlb.h>
-+#include <linux/list.h>
-+
-+#include <xen/xenbus.h>
-+
-+#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
-+
-+static struct proc_dir_entry *balloon_pde;
-+
-+static DECLARE_MUTEX(balloon_mutex);
-+
-+/*
-+ * Protects atomic reservation decrease/increase against concurrent increases.
-+ * Also protects non-atomic updates of current_pages and driver_pages, and
-+ * balloon lists.
-+ */
-+spinlock_t balloon_lock = SPIN_LOCK_UNLOCKED;
-+
-+/* We aim for 'current allocation' == 'target allocation'. */
-+static unsigned long current_pages;
-+static unsigned long target_pages;
-+
-+/* VM /proc information for memory */
-+extern unsigned long totalram_pages;
-+
-+/* We may hit the hard limit in Xen. If we do then we remember it. */
-+static unsigned long hard_limit;
-+
-+/*
-+ * Drivers may alter the memory reservation independently, but they must
-+ * inform the balloon driver so that we can avoid hitting the hard limit.
-+ */
-+static unsigned long driver_pages;
-+
-+/* List of ballooned pages, threaded through the mem_map array. */
-+static LIST_HEAD(ballooned_pages);
-+static unsigned long balloon_low, balloon_high;
-+
-+/* Main work function, always executed in process context. */
-+static void balloon_process(void *unused);
-+static DECLARE_WORK(balloon_worker, balloon_process, NULL);
-+static struct timer_list balloon_timer;
-+
-+#define PAGE_TO_LIST(p) (&(p)->ballooned)
-+#define LIST_TO_PAGE(l) list_entry((l), struct page, ballooned)
-+#define UNLIST_PAGE(p) \
-+ do { \
-+ list_del(PAGE_TO_LIST(p)); \
-+ PAGE_TO_LIST(p)->next = NULL; \
-+ PAGE_TO_LIST(p)->prev = NULL; \
-+ } while(0)
-+
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "xen_mem: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_mem: " fmt, ##args)
-+
-+/* balloon_append: add the given page to the balloon. */
-+static void balloon_append(struct page *page)
-+{
-+ /* Lowmem is re-populated first, so highmem pages go at list tail. */
-+ if (PageHighMem(page)) {
-+ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
-+ balloon_high++;
-+ } else {
-+ list_add(PAGE_TO_LIST(page), &ballooned_pages);
-+ balloon_low++;
-+ }
-+}
-+
-+/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-+static struct page *balloon_retrieve(void)
-+{
-+ struct page *page;
-+
-+ if (list_empty(&ballooned_pages))
-+ return NULL;
-+
-+ page = LIST_TO_PAGE(ballooned_pages.next);
-+ UNLIST_PAGE(page);
-+
-+ if (PageHighMem(page))
-+ balloon_high--;
-+ else
-+ balloon_low--;
-+
-+ return page;
-+}
-+
-+static struct page *balloon_first_page(void)
-+{
-+ if (list_empty(&ballooned_pages))
-+ return NULL;
-+ return LIST_TO_PAGE(ballooned_pages.next);
-+}
-+
-+static struct page *balloon_next_page(struct page *page)
-+{
-+ struct list_head *next = PAGE_TO_LIST(page)->next;
-+ if (next == &ballooned_pages)
-+ return NULL;
-+ return LIST_TO_PAGE(next);
-+}
-+
-+static void balloon_alarm(unsigned long unused)
-+{
-+ schedule_work(&balloon_worker);
-+}
-+
-+static unsigned long current_target(void)
-+{
-+ unsigned long target = min(target_pages, hard_limit);
-+ if (target > (current_pages + balloon_low + balloon_high))
-+ target = current_pages + balloon_low + balloon_high;
-+ return target;
-+}
-+
-+static int increase_reservation(unsigned long nr_pages)
-+{
-+ unsigned long *frame_list, pfn, i, flags;
-+ struct page *page;
-+ long rc;
-+ struct xen_memory_reservation reservation = {
-+ .address_bits = 0,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+
-+ if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
-+ nr_pages = PAGE_SIZE / sizeof(unsigned long);
-+
-+ frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
-+ if (frame_list == NULL)
-+ return -ENOMEM;
-+
-+ balloon_lock(flags);
-+
-+ page = balloon_first_page();
-+ for (i = 0; i < nr_pages; i++) {
-+ BUG_ON(page == NULL);
-+ frame_list[i] = page_to_pfn(page);;
-+ page = balloon_next_page(page);
-+ }
-+
-+ reservation.extent_start = frame_list;
-+ reservation.nr_extents = nr_pages;
-+ rc = HYPERVISOR_memory_op(
-+ XENMEM_populate_physmap, &reservation);
-+ if (rc < nr_pages) {
-+ int ret;
-+ /* We hit the Xen hard limit: reprobe. */
-+ reservation.extent_start = frame_list;
-+ reservation.nr_extents = rc;
-+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+ &reservation);
-+ BUG_ON(ret != rc);
-+ hard_limit = current_pages + rc - driver_pages;
-+ goto out;
-+ }
-+
-+ for (i = 0; i < nr_pages; i++) {
-+ page = balloon_retrieve();
-+ BUG_ON(page == NULL);
-+
-+ pfn = page_to_pfn(page);
-+ BUG_ON(phys_to_machine_mapping_valid(pfn));
-+
-+ /* Update P->M and M->P tables. */
-+ set_phys_to_machine(pfn, frame_list[i]);
-+ xen_machphys_update(frame_list[i], pfn);
-+
-+ /* Link back into the page tables if not highmem. */
-+ if (pfn < max_low_pfn) {
-+ int ret;
-+ ret = HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(pfn << PAGE_SHIFT),
-+ pfn_pte_ma(frame_list[i], PAGE_KERNEL),
-+ 0);
-+ BUG_ON(ret);
-+ }
-+
-+ /* Relinquish the page back to the allocator. */
-+ ClearPageReserved(page);
-+ set_page_count(page, 1);
-+ __free_page(page);
-+ }
-+
-+ current_pages += nr_pages;
-+ totalram_pages = current_pages;
-+
-+ out:
-+ balloon_unlock(flags);
-+
-+ free_page((unsigned long)frame_list);
-+
-+ return 0;
-+}
-+
-+static int decrease_reservation(unsigned long nr_pages)
-+{
-+ unsigned long *frame_list, pfn, i, flags;
-+ struct page *page;
-+ void *v;
-+ int need_sleep = 0;
-+ int ret;
-+ struct xen_memory_reservation reservation = {
-+ .address_bits = 0,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+
-+ if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
-+ nr_pages = PAGE_SIZE / sizeof(unsigned long);
-+
-+ frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
-+ if (frame_list == NULL)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < nr_pages; i++) {
-+ if ((page = alloc_page(GFP_HIGHUSER)) == NULL) {
-+ nr_pages = i;
-+ need_sleep = 1;
-+ break;
-+ }
-+
-+ pfn = page_to_pfn(page);
-+ frame_list[i] = pfn_to_mfn(pfn);
-+
-+ if (!PageHighMem(page)) {
-+ v = phys_to_virt(pfn << PAGE_SHIFT);
-+ scrub_pages(v, 1);
-+ ret = HYPERVISOR_update_va_mapping(
-+ (unsigned long)v, __pte_ma(0), 0);
-+ BUG_ON(ret);
-+ }
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+ else {
-+ v = kmap(page);
-+ scrub_pages(v, 1);
-+ kunmap(page);
-+ }
-+#endif
-+ }
-+
-+ /* Ensure that ballooned highmem pages don't have kmaps. */
-+ kmap_flush_unused();
-+ flush_tlb_all();
-+
-+ balloon_lock(flags);
-+
-+ /* No more mappings: invalidate P2M and add to balloon. */
-+ for (i = 0; i < nr_pages; i++) {
-+ pfn = mfn_to_pfn(frame_list[i]);
-+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-+ balloon_append(pfn_to_page(pfn));
-+ }
-+
-+ reservation.extent_start = frame_list;
-+ reservation.nr_extents = nr_pages;
-+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-+ BUG_ON(ret != nr_pages);
-+
-+ current_pages -= nr_pages;
-+ totalram_pages = current_pages;
-+
-+ balloon_unlock(flags);
-+
-+ free_page((unsigned long)frame_list);
-+
-+ return need_sleep;
-+}
-+
-+/*
-+ * We avoid multiple worker processes conflicting via the balloon mutex.
-+ * We may of course race updates of the target counts (which are protected
-+ * by the balloon lock), or with changes to the Xen hard limit, but we will
-+ * recover from these in time.
-+ */
-+static void balloon_process(void *unused)
-+{
-+ int need_sleep = 0;
-+ long credit;
-+
-+ down(&balloon_mutex);
-+
-+ do {
-+ credit = current_target() - current_pages;
-+ if (credit > 0)
-+ need_sleep = (increase_reservation(credit) != 0);
-+ if (credit < 0)
-+ need_sleep = (decrease_reservation(-credit) != 0);
-+
-+#ifndef CONFIG_PREEMPT
-+ if (need_resched())
-+ schedule();
-+#endif
-+ } while ((credit != 0) && !need_sleep);
-+
-+ /* Schedule more work if there is some still to be done. */
-+ if (current_target() != current_pages)
-+ mod_timer(&balloon_timer, jiffies + HZ);
-+
-+ up(&balloon_mutex);
-+}
-+
-+/* Resets the Xen limit, sets new target, and kicks off processing. */
-+static void set_new_target(unsigned long target)
-+{
-+ /* No need for lock. Not read-modify-write updates. */
-+ hard_limit = ~0UL;
-+ target_pages = target;
-+ schedule_work(&balloon_worker);
-+}
-+
-+static struct xenbus_watch target_watch =
-+{
-+ .node = "memory/target"
-+};
-+
-+/* React to a change in the target key */
-+static void watch_target(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ unsigned long long new_target;
-+ int err;
-+
-+ err = xenbus_scanf(XBT_NULL, "memory", "target", "%llu", &new_target);
-+ if (err != 1) {
-+ /* This is ok (for domain0 at least) - so just return */
-+ return;
-+ }
-+
-+ /* The given memory/target value is in KiB, so it needs converting to
-+ pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
-+ */
-+ set_new_target(new_target >> (PAGE_SHIFT - 10));
-+
-+}
-+
-+static int balloon_init_watcher(struct notifier_block *notifier,
-+ unsigned long event,
-+ void *data)
-+{
-+ int err;
-+
-+ err = register_xenbus_watch(&target_watch);
-+ if (err)
-+ printk(KERN_ERR "Failed to set balloon watcher\n");
-+
-+ return NOTIFY_DONE;
-+
-+}
-+
-+static int balloon_write(struct file *file, const char __user *buffer,
-+ unsigned long count, void *data)
-+{
-+ char memstring[64], *endchar;
-+ unsigned long long target_bytes;
-+
-+ if (!capable(CAP_SYS_ADMIN))
-+ return -EPERM;
-+
-+ if (count <= 1)
-+ return -EBADMSG; /* runt */
-+ if (count > sizeof(memstring))
-+ return -EFBIG; /* too long */
-+
-+ if (copy_from_user(memstring, buffer, count))
-+ return -EFAULT;
-+ memstring[sizeof(memstring)-1] = '\0';
-+
-+ target_bytes = memparse(memstring, &endchar);
-+ set_new_target(target_bytes >> PAGE_SHIFT);
-+
-+ return count;
-+}
-+
-+static int balloon_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ len = sprintf(
-+ page,
-+ "Current allocation: %8lu kB\n"
-+ "Requested target: %8lu kB\n"
-+ "Low-mem balloon: %8lu kB\n"
-+ "High-mem balloon: %8lu kB\n"
-+ "Xen hard limit: ",
-+ PAGES2KB(current_pages), PAGES2KB(target_pages),
-+ PAGES2KB(balloon_low), PAGES2KB(balloon_high));
-+
-+ if (hard_limit != ~0UL) {
-+ len += sprintf(
-+ page + len,
-+ "%8lu kB (inc. %8lu kB driver headroom)\n",
-+ PAGES2KB(hard_limit), PAGES2KB(driver_pages));
-+ } else {
-+ len += sprintf(
-+ page + len,
-+ " ??? kB\n");
-+ }
-+
-+ *eof = 1;
-+ return len;
-+}
-+
-+static struct notifier_block xenstore_notifier;
-+
-+static int __init balloon_init(void)
-+{
-+ unsigned long pfn;
-+ struct page *page;
-+
-+ IPRINTK("Initialising balloon driver.\n");
-+
-+ if (xen_init() < 0)
-+ return -1;
-+
-+ current_pages = min(xen_start_info->nr_pages, max_pfn);
-+ target_pages = current_pages;
-+ balloon_low = 0;
-+ balloon_high = 0;
-+ driver_pages = 0UL;
-+ hard_limit = ~0UL;
-+
-+ init_timer(&balloon_timer);
-+ balloon_timer.data = 0;
-+ balloon_timer.function = balloon_alarm;
-+
-+ if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
-+ WPRINTK("Unable to create /proc/xen/balloon.\n");
-+ return -1;
-+ }
-+
-+ balloon_pde->read_proc = balloon_read;
-+ balloon_pde->write_proc = balloon_write;
-+
-+ /* Initialise the balloon with excess memory space. */
-+ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
-+ page = pfn_to_page(pfn);
-+ if (!PageReserved(page))
-+ balloon_append(page);
-+ }
-+
-+ target_watch.callback = watch_target;
-+ xenstore_notifier.notifier_call = balloon_init_watcher;
-+
-+ register_xenstore_notifier(&xenstore_notifier);
-+
-+ return 0;
-+}
-+
-+subsys_initcall(balloon_init);
-+
-+void balloon_update_driver_allowance(long delta)
-+{
-+ unsigned long flags;
-+
-+ balloon_lock(flags);
-+ driver_pages += delta;
-+ balloon_unlock(flags);
-+}
-+
-+static int dealloc_pte_fn(
-+ pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-+{
-+ unsigned long mfn = pte_mfn(*pte);
-+ int ret;
-+ struct xen_memory_reservation reservation = {
-+ .extent_start = &mfn,
-+ .nr_extents = 1,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+ set_pte_at(&init_mm, addr, pte, __pte_ma(0));
-+ set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
-+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-+ BUG_ON(ret != 1);
-+ return 0;
-+}
-+
-+struct page *balloon_alloc_empty_page_range(unsigned long nr_pages)
-+{
-+ unsigned long vstart, flags;
-+ unsigned int order = get_order(nr_pages * PAGE_SIZE);
-+ int ret;
-+
-+ vstart = __get_free_pages(GFP_KERNEL, order);
-+ if (vstart == 0)
-+ return NULL;
-+
-+ scrub_pages(vstart, 1 << order);
-+
-+ balloon_lock(flags);
-+ ret = generic_page_range(
-+ &init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL);
-+ BUG_ON(ret);
-+ current_pages -= 1UL << order;
-+ balloon_unlock(flags);
-+
-+ schedule_work(&balloon_worker);
-+
-+ flush_tlb_all();
-+
-+ return virt_to_page(vstart);
-+}
-+
-+void balloon_dealloc_empty_page_range(
-+ struct page *page, unsigned long nr_pages)
-+{
-+ unsigned long i, flags;
-+ unsigned int order = get_order(nr_pages * PAGE_SIZE);
-+
-+ balloon_lock(flags);
-+ for (i = 0; i < (1UL << order); i++)
-+ balloon_append(page + i);
-+ balloon_unlock(flags);
-+
-+ schedule_work(&balloon_worker);
-+}
-+
-+EXPORT_SYMBOL(balloon_update_driver_allowance);
-+EXPORT_SYMBOL(balloon_alloc_empty_page_range);
-+EXPORT_SYMBOL(balloon_dealloc_empty_page_range);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blkback/Makefile b/drivers/xen/blkback/Makefile
-new file mode 100644
-index 0000000..8bab63d
---- /dev/null
-+++ b/drivers/xen/blkback/Makefile
-@@ -0,0 +1,3 @@
-+obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
-+
-+blkbk-y := blkback.o xenbus.o interface.o vbd.o
-diff --git a/drivers/xen/blkback/blkback.c b/drivers/xen/blkback/blkback.c
-new file mode 100644
-index 0000000..d624caf
---- /dev/null
-+++ b/drivers/xen/blkback/blkback.c
-@@ -0,0 +1,596 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/main.c
-+ *
-+ * Back-end of the driver for virtual block devices. This portion of the
-+ * driver exports a 'unified' block-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A
-+ * reference front-end implementation can be found in:
-+ * arch/xen/drivers/blkif/frontend
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Copyright (c) 2005, Christopher Clark
-+ */
-+
-+#include <linux/spinlock.h>
-+#include <linux/kthread.h>
-+#include <linux/list.h>
-+#include <xen/balloon.h>
-+#include <asm/hypervisor.h>
-+#include "common.h"
-+
-+/*
-+ * These are rather arbitrary. They are fairly large because adjacent requests
-+ * pulled from a communication ring are quite likely to end up being part of
-+ * the same scatter/gather request at the disc.
-+ *
-+ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
-+ *
-+ * This will increase the chances of being able to write whole tracks.
-+ * 64 should be enough to keep us competitive with Linux.
-+ */
-+static int blkif_reqs = 64;
-+module_param_named(reqs, blkif_reqs, int, 0);
-+MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
-+
-+static int mmap_pages;
-+
-+/* Run-time switchable: /sys/module/blkback/parameters/ */
-+static unsigned int log_stats = 0;
-+static unsigned int debug_lvl = 0;
-+module_param(log_stats, int, 0644);
-+module_param(debug_lvl, int, 0644);
-+
-+/*
-+ * Each outstanding request that we've passed to the lower device layers has a
-+ * 'pending_req' allocated to it. Each buffer_head that completes decrements
-+ * the pendcnt towards zero. When it hits zero, the specified domain has a
-+ * response queued for it, with the saved 'id' passed back.
-+ */
-+typedef struct {
-+ blkif_t *blkif;
-+ unsigned long id;
-+ int nr_pages;
-+ atomic_t pendcnt;
-+ unsigned short operation;
-+ int status;
-+ struct list_head free_list;
-+} pending_req_t;
-+
-+static pending_req_t *pending_reqs;
-+static struct list_head pending_free;
-+static spinlock_t pending_free_lock = SPIN_LOCK_UNLOCKED;
-+static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
-+
-+#define BLKBACK_INVALID_HANDLE (~0)
-+
-+static unsigned long mmap_vstart;
-+static unsigned long *pending_vaddrs;
-+static grant_handle_t *pending_grant_handles;
-+
-+static inline int vaddr_pagenr(pending_req_t *req, int seg)
-+{
-+ return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
-+}
-+
-+static inline unsigned long vaddr(pending_req_t *req, int seg)
-+{
-+ return pending_vaddrs[vaddr_pagenr(req, seg)];
-+}
-+
-+#define pending_handle(_req, _seg) \
-+ (pending_grant_handles[vaddr_pagenr(_req, _seg)])
-+
-+
-+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
-+/*
-+ * If the tap driver is used, we may get pages belonging to either the tap
-+ * or (more likely) the real frontend. The backend must specify which domain
-+ * a given page belongs to in update_va_mapping though. For the moment,
-+ * the tap rewrites the ID field of the request to contain the request index
-+ * and the id of the real front end domain.
-+ */
-+#define BLKTAP_COOKIE 0xbeadfeed
-+static inline domid_t ID_TO_DOM(unsigned long id) { return (id >> 16); }
-+#endif
-+
-+static int do_block_io_op(blkif_t *blkif);
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+ blkif_request_t *req,
-+ pending_req_t *pending_req);
-+static void make_response(blkif_t *blkif, unsigned long id,
-+ unsigned short op, int st);
-+
-+/******************************************************************
-+ * misc small helpers
-+ */
-+static pending_req_t* alloc_req(void)
-+{
-+ pending_req_t *req = NULL;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&pending_free_lock, flags);
-+ if (!list_empty(&pending_free)) {
-+ req = list_entry(pending_free.next, pending_req_t, free_list);
-+ list_del(&req->free_list);
-+ }
-+ spin_unlock_irqrestore(&pending_free_lock, flags);
-+ return req;
-+}
-+
-+static void free_req(pending_req_t *req)
-+{
-+ unsigned long flags;
-+ int was_empty;
-+
-+ spin_lock_irqsave(&pending_free_lock, flags);
-+ was_empty = list_empty(&pending_free);
-+ list_add(&req->free_list, &pending_free);
-+ spin_unlock_irqrestore(&pending_free_lock, flags);
-+ if (was_empty)
-+ wake_up(&pending_free_wq);
-+}
-+
-+static void unplug_queue(blkif_t *blkif)
-+{
-+ if (blkif->plug == NULL)
-+ return;
-+ if (blkif->plug->unplug_fn)
-+ blkif->plug->unplug_fn(blkif->plug);
-+ blk_put_queue(blkif->plug);
-+ blkif->plug = NULL;
-+}
-+
-+static void plug_queue(blkif_t *blkif, struct bio *bio)
-+{
-+ request_queue_t *q = bdev_get_queue(bio->bi_bdev);
-+
-+ if (q == blkif->plug)
-+ return;
-+ unplug_queue(blkif);
-+ blk_get_queue(q);
-+ blkif->plug = q;
-+}
-+
-+static void fast_flush_area(pending_req_t *req)
-+{
-+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+ unsigned int i, invcount = 0;
-+ grant_handle_t handle;
-+ int ret;
-+
-+ for (i = 0; i < req->nr_pages; i++) {
-+ handle = pending_handle(req, i);
-+ if (handle == BLKBACK_INVALID_HANDLE)
-+ continue;
-+ unmap[invcount].host_addr = vaddr(req, i);
-+ unmap[invcount].dev_bus_addr = 0;
-+ unmap[invcount].handle = handle;
-+ pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
-+ invcount++;
-+ }
-+
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_unmap_grant_ref, unmap, invcount);
-+ BUG_ON(ret);
-+}
-+
-+/******************************************************************
-+ * SCHEDULER FUNCTIONS
-+ */
-+
-+static void print_stats(blkif_t *blkif)
-+{
-+ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
-+ current->comm, blkif->st_oo_req,
-+ blkif->st_rd_req, blkif->st_wr_req);
-+ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
-+ blkif->st_rd_req = 0;
-+ blkif->st_wr_req = 0;
-+ blkif->st_oo_req = 0;
-+}
-+
-+int blkif_schedule(void *arg)
-+{
-+ blkif_t *blkif = arg;
-+
-+ blkif_get(blkif);
-+ if (debug_lvl)
-+ printk(KERN_DEBUG "%s: started\n", current->comm);
-+ for (;;) {
-+ if (kthread_should_stop()) {
-+ /* asked to quit? */
-+ if (!atomic_read(&blkif->io_pending))
-+ break;
-+ if (debug_lvl)
-+ printk(KERN_DEBUG "%s: I/O pending, "
-+ "delaying exit\n", current->comm);
-+ }
-+
-+ if (!atomic_read(&blkif->io_pending)) {
-+ /* Wait for work to do. */
-+ wait_event_interruptible(
-+ blkif->wq,
-+ (atomic_read(&blkif->io_pending) ||
-+ kthread_should_stop()));
-+ } else if (list_empty(&pending_free)) {
-+ /* Wait for pending_req becoming available. */
-+ wait_event_interruptible(
-+ pending_free_wq,
-+ !list_empty(&pending_free));
-+ }
-+
-+ if (blkif->status != CONNECTED) {
-+ /* make sure we are connected */
-+ if (debug_lvl)
-+ printk(KERN_DEBUG "%s: not connected "
-+ "(%d pending)\n",
-+ current->comm,
-+ atomic_read(&blkif->io_pending));
-+ wait_event_interruptible(
-+ blkif->wq,
-+ (blkif->status == CONNECTED ||
-+ kthread_should_stop()));
-+ continue;
-+ }
-+
-+ /* Schedule I/O */
-+ atomic_set(&blkif->io_pending, 0);
-+ if (do_block_io_op(blkif))
-+ atomic_inc(&blkif->io_pending);
-+ unplug_queue(blkif);
-+
-+ if (log_stats && time_after(jiffies, blkif->st_print))
-+ print_stats(blkif);
-+ }
-+
-+ if (log_stats)
-+ print_stats(blkif);
-+ if (debug_lvl)
-+ printk(KERN_DEBUG "%s: exiting\n", current->comm);
-+ blkif->xenblkd = NULL;
-+ blkif_put(blkif);
-+ return 0;
-+}
-+
-+/******************************************************************
-+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
-+ */
-+
-+static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
-+{
-+ /* An error fails the entire request. */
-+ if (!uptodate) {
-+ DPRINTK("Buffer not up-to-date at end of operation\n");
-+ pending_req->status = BLKIF_RSP_ERROR;
-+ }
-+
-+ if (atomic_dec_and_test(&pending_req->pendcnt)) {
-+ fast_flush_area(pending_req);
-+ make_response(pending_req->blkif, pending_req->id,
-+ pending_req->operation, pending_req->status);
-+ blkif_put(pending_req->blkif);
-+ free_req(pending_req);
-+ }
-+}
-+
-+static int end_block_io_op(struct bio *bio, unsigned int done, int error)
-+{
-+ if (bio->bi_size != 0)
-+ return 1;
-+ __end_block_io_op(bio->bi_private, !error);
-+ bio_put(bio);
-+ return error;
-+}
-+
-+
-+/******************************************************************************
-+ * NOTIFICATION FROM GUEST OS.
-+ */
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ blkif_t *blkif = dev_id;
-+
-+ atomic_inc(&blkif->io_pending);
-+ wake_up(&blkif->wq);
-+ return IRQ_HANDLED;
-+}
-+
-+
-+
-+/******************************************************************
-+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
-+ */
-+
-+static int do_block_io_op(blkif_t *blkif)
-+{
-+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-+ blkif_request_t *req;
-+ pending_req_t *pending_req;
-+ RING_IDX rc, rp;
-+ int more_to_do = 0;
-+
-+ rc = blk_ring->req_cons;
-+ rp = blk_ring->sring->req_prod;
-+ rmb(); /* Ensure we see queued requests up to 'rp'. */
-+
-+ while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
-+
-+ pending_req = alloc_req();
-+ if (NULL == pending_req) {
-+ blkif->st_oo_req++;
-+ more_to_do = 1;
-+ break;
-+ }
-+
-+ req = RING_GET_REQUEST(blk_ring, rc);
-+ blk_ring->req_cons = ++rc; /* before make_response() */
-+
-+ switch (req->operation) {
-+ case BLKIF_OP_READ:
-+ blkif->st_rd_req++;
-+ dispatch_rw_block_io(blkif, req, pending_req);
-+ break;
-+ case BLKIF_OP_WRITE:
-+ blkif->st_wr_req++;
-+ dispatch_rw_block_io(blkif, req, pending_req);
-+ break;
-+ default:
-+ DPRINTK("error: unknown block io operation [%d]\n",
-+ req->operation);
-+ make_response(blkif, req->id, req->operation,
-+ BLKIF_RSP_ERROR);
-+ free_req(pending_req);
-+ break;
-+ }
-+ }
-+ return more_to_do;
-+}
-+
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+ blkif_request_t *req,
-+ pending_req_t *pending_req)
-+{
-+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
-+ int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
-+ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+ struct phys_req preq;
-+ struct {
-+ unsigned long buf; unsigned int nsec;
-+ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+ unsigned int nseg;
-+ struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+ int ret, i, nbio = 0;
-+
-+ /* Check that number of segments is sane. */
-+ nseg = req->nr_segments;
-+ if (unlikely(nseg == 0) ||
-+ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
-+ DPRINTK("Bad number of segments in request (%d)\n", nseg);
-+ goto fail_response;
-+ }
-+
-+ preq.dev = req->handle;
-+ preq.sector_number = req->sector_number;
-+ preq.nr_sects = 0;
-+
-+ pending_req->blkif = blkif;
-+ pending_req->id = req->id;
-+ pending_req->operation = operation;
-+ pending_req->status = BLKIF_RSP_OKAY;
-+ pending_req->nr_pages = nseg;
-+
-+ for (i = 0; i < nseg; i++) {
-+ seg[i].nsec = req->seg[i].last_sect -
-+ req->seg[i].first_sect + 1;
-+
-+ if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
-+ (seg[i].nsec <= 0))
-+ goto fail_response;
-+ preq.nr_sects += seg[i].nsec;
-+
-+ map[i].host_addr = vaddr(pending_req, i);
-+ map[i].dom = blkif->domid;
-+ map[i].ref = req->seg[i].gref;
-+ map[i].flags = GNTMAP_host_map;
-+ if ( operation == WRITE )
-+ map[i].flags |= GNTMAP_readonly;
-+ }
-+
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
-+ BUG_ON(ret);
-+
-+ for (i = 0; i < nseg; i++) {
-+ if (unlikely(map[i].status != 0)) {
-+ DPRINTK("invalid buffer -- could not remap it\n");
-+ goto fail_flush;
-+ }
-+
-+ pending_handle(pending_req, i) = map[i].handle;
-+#ifdef __ia64__
-+ pending_vaddrs[vaddr_pagenr(pending_req, i)] =
-+ (unsigned long)gnttab_map_vaddr(map[i]);
-+#else
-+ set_phys_to_machine(__pa(vaddr(
-+ pending_req, i)) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
-+#endif
-+ seg[i].buf = map[i].dev_bus_addr |
-+ (req->seg[i].first_sect << 9);
-+ }
-+
-+ if (vbd_translate(&preq, blkif, operation) != 0) {
-+ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
-+ operation == READ ? "read" : "write",
-+ preq.sector_number,
-+ preq.sector_number + preq.nr_sects, preq.dev);
-+ goto fail_flush;
-+ }
-+
-+ for (i = 0; i < nseg; i++) {
-+ if (((int)preq.sector_number|(int)seg[i].nsec) &
-+ ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
-+ DPRINTK("Misaligned I/O request from domain %d",
-+ blkif->domid);
-+ goto fail_put_bio;
-+ }
-+
-+ while ((bio == NULL) ||
-+ (bio_add_page(bio,
-+ virt_to_page(vaddr(pending_req, i)),
-+ seg[i].nsec << 9,
-+ seg[i].buf & ~PAGE_MASK) == 0)) {
-+ bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
-+ if (unlikely(bio == NULL))
-+ goto fail_put_bio;
-+
-+ bio->bi_bdev = preq.bdev;
-+ bio->bi_private = pending_req;
-+ bio->bi_end_io = end_block_io_op;
-+ bio->bi_sector = preq.sector_number;
-+ }
-+
-+ preq.sector_number += seg[i].nsec;
-+ }
-+
-+ plug_queue(blkif, bio);
-+ atomic_set(&pending_req->pendcnt, nbio);
-+ blkif_get(blkif);
-+
-+ for (i = 0; i < nbio; i++)
-+ submit_bio(operation, biolist[i]);
-+
-+ return;
-+
-+ fail_put_bio:
-+ for (i = 0; i < (nbio-1); i++)
-+ bio_put(biolist[i]);
-+ fail_flush:
-+ fast_flush_area(pending_req);
-+ fail_response:
-+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-+ free_req(pending_req);
-+}
-+
-+
-+
-+/******************************************************************
-+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
-+ */
-+
-+
-+static void make_response(blkif_t *blkif, unsigned long id,
-+ unsigned short op, int st)
-+{
-+ blkif_response_t *resp;
-+ unsigned long flags;
-+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-+ int more_to_do = 0;
-+ int notify;
-+
-+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-+
-+ /* Place on the response ring for the relevant domain. */
-+ resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-+ resp->id = id;
-+ resp->operation = op;
-+ resp->status = st;
-+ blk_ring->rsp_prod_pvt++;
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
-+
-+ if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
-+ /*
-+ * Tail check for pending requests. Allows frontend to avoid
-+ * notifications if requests are already in flight (lower
-+ * overheads and promotes batching).
-+ */
-+ RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
-+
-+ } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
-+ more_to_do = 1;
-+
-+ }
-+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-+
-+ if (more_to_do) {
-+ atomic_inc(&blkif->io_pending);
-+ wake_up(&blkif->wq);
-+ }
-+ if (notify)
-+ notify_remote_via_irq(blkif->irq);
-+}
-+
-+static int __init blkif_init(void)
-+{
-+ struct page *page;
-+ int i;
-+
-+ if (xen_init() < 0)
-+ return -ENODEV;
-+
-+ mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
-+ pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
-+ blkif_reqs, GFP_KERNEL);
-+ pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
-+ mmap_pages, GFP_KERNEL);
-+ pending_vaddrs = kmalloc(sizeof(pending_vaddrs[0]) *
-+ mmap_pages, GFP_KERNEL);
-+ if (!pending_reqs || !pending_grant_handles || !pending_vaddrs) {
-+ kfree(pending_reqs);
-+ kfree(pending_grant_handles);
-+ kfree(pending_vaddrs);
-+ printk("%s: out of memory\n", __FUNCTION__);
-+ return -ENOMEM;
-+ }
-+
-+ blkif_interface_init();
-+
-+#ifdef __ia64__
-+ extern unsigned long alloc_empty_foreign_map_page_range(
-+ unsigned long pages);
-+ mmap_vstart = (unsigned long)
-+ alloc_empty_foreign_map_page_range(mmap_pages);
-+#else /* ! ia64 */
-+ page = balloon_alloc_empty_page_range(mmap_pages);
-+ BUG_ON(page == NULL);
-+ mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-+#endif
-+ printk("%s: reqs=%d, pages=%d, mmap_vstart=0x%lx\n",
-+ __FUNCTION__, blkif_reqs, mmap_pages, mmap_vstart);
-+ BUG_ON(mmap_vstart == 0);
-+ for (i = 0; i < mmap_pages; i++) {
-+ pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
-+ pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
-+ }
-+
-+ memset(pending_reqs, 0, sizeof(pending_reqs));
-+ INIT_LIST_HEAD(&pending_free);
-+
-+ for (i = 0; i < blkif_reqs; i++)
-+ list_add_tail(&pending_reqs[i].free_list, &pending_free);
-+
-+ blkif_xenbus_init();
-+ __unsafe(THIS_MODULE);
-+ return 0;
-+}
-+
-+module_init(blkif_init);
-+
-+static void blkif_exit(void)
-+{
-+ BUG();
-+}
-+
-+module_exit(blkif_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blkback/common.h b/drivers/xen/blkback/common.h
-new file mode 100644
-index 0000000..aab5a9f
---- /dev/null
-+++ b/drivers/xen/blkback/common.h
-@@ -0,0 +1,123 @@
-+
-+#ifndef __BLKIF__BACKEND__COMMON_H__
-+#define __BLKIF__BACKEND__COMMON_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <asm/io.h>
-+#include <asm/setup.h>
-+#include <asm/pgalloc.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/io/blkif.h>
-+#include <xen/interface/io/ring.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+
-+struct vbd {
-+ blkif_vdev_t handle; /* what the domain refers to this vbd as */
-+ unsigned char readonly; /* Non-zero -> read-only */
-+ unsigned char type; /* VDISK_xxx */
-+ u32 pdevice; /* phys device that this vbd maps to */
-+ struct block_device *bdev;
-+};
-+
-+struct backend_info;
-+
-+typedef struct blkif_st {
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+ /* Physical parameters of the comms window. */
-+ unsigned int evtchn;
-+ unsigned int irq;
-+ /* Comms information. */
-+ blkif_back_ring_t blk_ring;
-+ struct vm_struct *blk_ring_area;
-+ /* The VBD attached to this interface. */
-+ struct vbd vbd;
-+ /* Back pointer to the backend_info. */
-+ struct backend_info *be;
-+ /* Private fields. */
-+ enum { DISCONNECTED, CONNECTED } status;
-+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
-+ /* Is this a blktap frontend */
-+ unsigned int is_blktap;
-+#endif
-+ spinlock_t blk_ring_lock;
-+ atomic_t refcnt;
-+
-+ wait_queue_head_t wq;
-+ struct task_struct *xenblkd;
-+ atomic_t io_pending;
-+ request_queue_t *plug;
-+
-+ /* statistics */
-+ unsigned long st_print;
-+ int st_rd_req;
-+ int st_wr_req;
-+ int st_oo_req;
-+
-+ struct work_struct free_work;
-+
-+ grant_handle_t shmem_handle;
-+ grant_ref_t shmem_ref;
-+} blkif_t;
-+
-+blkif_t *alloc_blkif(domid_t domid);
-+void free_blkif_callback(blkif_t *blkif);
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
-+
-+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define blkif_put(_b) \
-+ do { \
-+ if (atomic_dec_and_test(&(_b)->refcnt)) \
-+ free_blkif_callback(_b); \
-+ } while (0)
-+
-+/* Create a vbd. */
-+int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
-+ unsigned minor, int readonly);
-+void vbd_free(struct vbd *vbd);
-+
-+unsigned long vbd_size(struct vbd *vbd);
-+unsigned int vbd_info(struct vbd *vbd);
-+unsigned long vbd_secsize(struct vbd *vbd);
-+
-+struct phys_req {
-+ unsigned short dev;
-+ unsigned short nr_sects;
-+ struct block_device *bdev;
-+ blkif_sector_t sector_number;
-+};
-+
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
-+
-+void blkif_interface_init(void);
-+
-+void blkif_xenbus_init(void);
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+int blkif_schedule(void *arg);
-+
-+void update_blkif_status(blkif_t *blkif);
-+
-+#endif /* __BLKIF__BACKEND__COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blkback/interface.c b/drivers/xen/blkback/interface.c
-new file mode 100644
-index 0000000..090d107
---- /dev/null
-+++ b/drivers/xen/blkback/interface.c
-@@ -0,0 +1,164 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/interface.c
-+ *
-+ * Block-device interface management.
-+ *
-+ * Copyright (c) 2004, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <xen/evtchn.h>
-+
-+static kmem_cache_t *blkif_cachep;
-+
-+blkif_t *alloc_blkif(domid_t domid)
-+{
-+ blkif_t *blkif;
-+
-+ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-+ if (!blkif)
-+ return ERR_PTR(-ENOMEM);
-+
-+ memset(blkif, 0, sizeof(*blkif));
-+ blkif->domid = domid;
-+ blkif->status = DISCONNECTED;
-+ spin_lock_init(&blkif->blk_ring_lock);
-+ atomic_set(&blkif->refcnt, 1);
-+ init_waitqueue_head(&blkif->wq);
-+ blkif->st_print = jiffies;
-+
-+ return blkif;
-+}
-+
-+static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
-+{
-+ struct gnttab_map_grant_ref op;
-+ int ret;
-+
-+ op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
-+ op.flags = GNTMAP_host_map;
-+ op.ref = shared_page;
-+ op.dom = blkif->domid;
-+
-+ lock_vm_area(blkif->blk_ring_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+ unlock_vm_area(blkif->blk_ring_area);
-+ BUG_ON(ret);
-+
-+ if (op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return op.status;
-+ }
-+
-+ blkif->shmem_ref = shared_page;
-+ blkif->shmem_handle = op.handle;
-+
-+#ifdef __ia64__
-+ /* on some arch's, map_grant_ref behaves like mmap, in that the
-+ * passed address is a hint and a different address may be returned */
-+ blkif->blk_ring_area->addr = gnttab_map_vaddr(op);
-+#endif
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_page(blkif_t *blkif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+ int ret;
-+
-+ op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
-+ op.handle = blkif->shmem_handle;
-+ op.dev_bus_addr = 0;
-+
-+ lock_vm_area(blkif->blk_ring_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+ unlock_vm_area(blkif->blk_ring_area);
-+ BUG_ON(ret);
-+}
-+
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
-+{
-+ blkif_sring_t *sring;
-+ int err;
-+ evtchn_op_t op = {
-+ .cmd = EVTCHNOP_bind_interdomain,
-+ .u.bind_interdomain.remote_dom = blkif->domid,
-+ .u.bind_interdomain.remote_port = evtchn };
-+
-+ /* Already connected through? */
-+ if (blkif->irq)
-+ return 0;
-+
-+ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
-+ return -ENOMEM;
-+
-+ err = map_frontend_page(blkif, shared_page);
-+ if (err) {
-+ free_vm_area(blkif->blk_ring_area);
-+ return err;
-+ }
-+
-+ err = HYPERVISOR_event_channel_op(&op);
-+ if (err) {
-+ unmap_frontend_page(blkif);
-+ free_vm_area(blkif->blk_ring_area);
-+ return err;
-+ }
-+
-+ blkif->evtchn = op.u.bind_interdomain.local_port;
-+
-+ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-+ BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
-+
-+ blkif->irq = bind_evtchn_to_irqhandler(
-+ blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
-+
-+ /* We're potentially connected now */
-+ update_blkif_status(blkif);
-+
-+ return 0;
-+}
-+
-+static void free_blkif(void *arg)
-+{
-+ blkif_t *blkif = (blkif_t *)arg;
-+
-+ /* Already disconnected? */
-+ if (blkif->irq) {
-+ unbind_from_irqhandler(blkif->irq, blkif);
-+ blkif->irq = 0;
-+ }
-+
-+ vbd_free(&blkif->vbd);
-+
-+ if (blkif->blk_ring.sring) {
-+ unmap_frontend_page(blkif);
-+ free_vm_area(blkif->blk_ring_area);
-+ blkif->blk_ring.sring = NULL;
-+ }
-+
-+ kmem_cache_free(blkif_cachep, blkif);
-+}
-+
-+void free_blkif_callback(blkif_t *blkif)
-+{
-+ INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
-+ schedule_work(&blkif->free_work);
-+}
-+
-+void __init blkif_interface_init(void)
-+{
-+ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
-+ 0, 0, NULL, NULL);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blkback/vbd.c b/drivers/xen/blkback/vbd.c
-new file mode 100644
-index 0000000..b2d0939
---- /dev/null
-+++ b/drivers/xen/blkback/vbd.c
-@@ -0,0 +1,102 @@
-+/******************************************************************************
-+ * blkback/vbd.c
-+ *
-+ * Routines for managing virtual block devices (VBDs).
-+ *
-+ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
-+ */
-+
-+#include "common.h"
-+#include <xen/xenbus.h>
-+
-+#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
-+ (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
-+
-+unsigned long vbd_size(struct vbd *vbd)
-+{
-+ return vbd_sz(vbd);
-+}
-+
-+unsigned int vbd_info(struct vbd *vbd)
-+{
-+ return vbd->type | (vbd->readonly?VDISK_READONLY:0);
-+}
-+
-+unsigned long vbd_secsize(struct vbd *vbd)
-+{
-+ return bdev_hardsect_size(vbd->bdev);
-+}
-+
-+int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
-+ unsigned minor, int readonly)
-+{
-+ struct vbd *vbd;
-+
-+ vbd = &blkif->vbd;
-+ vbd->handle = handle;
-+ vbd->readonly = readonly;
-+ vbd->type = 0;
-+
-+ vbd->pdevice = MKDEV(major, minor);
-+
-+ vbd->bdev = open_by_devnum(
-+ vbd->pdevice,
-+ vbd->readonly ? FMODE_READ : FMODE_WRITE);
-+ if (IS_ERR(vbd->bdev)) {
-+ DPRINTK("vbd_creat: device %08x doesn't exist.\n",
-+ vbd->pdevice);
-+ return -ENOENT;
-+ }
-+
-+ if (vbd->bdev->bd_disk == NULL) {
-+ DPRINTK("vbd_creat: device %08x doesn't exist.\n",
-+ vbd->pdevice);
-+ vbd_free(vbd);
-+ return -ENOENT;
-+ }
-+
-+ if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
-+ vbd->type |= VDISK_CDROM;
-+ if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
-+ vbd->type |= VDISK_REMOVABLE;
-+
-+ DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
-+ handle, blkif->domid);
-+ return 0;
-+}
-+
-+void vbd_free(struct vbd *vbd)
-+{
-+ if (vbd->bdev)
-+ blkdev_put(vbd->bdev);
-+ vbd->bdev = NULL;
-+}
-+
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
-+{
-+ struct vbd *vbd = &blkif->vbd;
-+ int rc = -EACCES;
-+
-+ if ((operation == WRITE) && vbd->readonly)
-+ goto out;
-+
-+ if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
-+ goto out;
-+
-+ req->dev = vbd->pdevice;
-+ req->bdev = vbd->bdev;
-+ rc = 0;
-+
-+ out:
-+ return rc;
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blkback/xenbus.c b/drivers/xen/blkback/xenbus.c
-new file mode 100644
-index 0000000..b1934d1
---- /dev/null
-+++ b/drivers/xen/blkback/xenbus.c
-@@ -0,0 +1,421 @@
-+/* Xenbus code for blkif backend
-+ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
-+ Copyright (C) 2005 XenSource Ltd
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 2 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; if not, write to the Free Software
-+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+*/
-+
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <linux/kthread.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
-+
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+
-+
-+struct backend_info
-+{
-+ struct xenbus_device *dev;
-+ blkif_t *blkif;
-+ struct xenbus_watch backend_watch;
-+
-+ unsigned major;
-+ unsigned minor;
-+ char *mode;
-+};
-+
-+
-+static void maybe_connect(struct backend_info *);
-+static void connect(struct backend_info *);
-+static int connect_ring(struct backend_info *);
-+static void backend_changed(struct xenbus_watch *, const char **,
-+ unsigned int);
-+
-+
-+void update_blkif_status(blkif_t *blkif)
-+{
-+ if(blkif->irq && blkif->vbd.bdev) {
-+ blkif->status = CONNECTED;
-+ (void)blkif_be_int(0, blkif, NULL);
-+ }
-+ maybe_connect(blkif->be);
-+}
-+
-+
-+static ssize_t show_physical_device(struct device *_dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct xenbus_device *dev = to_xenbus_device(_dev);
-+ struct backend_info *be = dev->data;
-+ return sprintf(buf, "%x:%x\n", be->major, be->minor);
-+}
-+DEVICE_ATTR(physical_device, S_IRUSR | S_IRGRP | S_IROTH,
-+ show_physical_device, NULL);
-+
-+
-+static ssize_t show_mode(struct device *_dev, struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct xenbus_device *dev = to_xenbus_device(_dev);
-+ struct backend_info *be = dev->data;
-+ return sprintf(buf, "%s\n", be->mode);
-+}
-+DEVICE_ATTR(mode, S_IRUSR | S_IRGRP | S_IROTH, show_mode, NULL);
-+
-+
-+static int blkback_remove(struct xenbus_device *dev)
-+{
-+ struct backend_info *be = dev->data;
-+
-+ DPRINTK("");
-+
-+ if (be->backend_watch.node) {
-+ unregister_xenbus_watch(&be->backend_watch);
-+ kfree(be->backend_watch.node);
-+ be->backend_watch.node = NULL;
-+ }
-+ if (be->blkif) {
-+ be->blkif->status = DISCONNECTED;
-+ if (be->blkif->xenblkd)
-+ kthread_stop(be->blkif->xenblkd);
-+ blkif_put(be->blkif);
-+ be->blkif = NULL;
-+ }
-+
-+ device_remove_file(&dev->dev, &dev_attr_physical_device);
-+ device_remove_file(&dev->dev, &dev_attr_mode);
-+
-+ kfree(be);
-+ dev->data = NULL;
-+ return 0;
-+}
-+
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures, and watch the store waiting for the hotplug scripts to tell us
-+ * the device's physical major and minor numbers. Switch to InitWait.
-+ */
-+static int blkback_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ struct backend_info *be = kmalloc(sizeof(struct backend_info),
-+ GFP_KERNEL);
-+ if (!be) {
-+ xenbus_dev_fatal(dev, -ENOMEM,
-+ "allocating backend structure");
-+ return -ENOMEM;
-+ }
-+ memset(be, 0, sizeof(*be));
-+
-+ be->dev = dev;
-+ dev->data = be;
-+
-+ be->blkif = alloc_blkif(dev->otherend_id);
-+ if (IS_ERR(be->blkif)) {
-+ err = PTR_ERR(be->blkif);
-+ be->blkif = NULL;
-+ xenbus_dev_fatal(dev, err, "creating block interface");
-+ goto fail;
-+ }
-+
-+ /* setup back pointer */
-+ be->blkif->be = be;
-+
-+ err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
-+ &be->backend_watch, backend_changed);
-+ if (err)
-+ goto fail;
-+
-+ err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
-+ if (err)
-+ goto fail;
-+
-+ return 0;
-+
-+fail:
-+ DPRINTK("failed");
-+ blkback_remove(dev);
-+ return err;
-+}
-+
-+
-+/**
-+ * Callback received when the hotplug scripts have placed the physical-device
-+ * node. Read it and the mode node, and create a vbd. If the frontend is
-+ * ready, connect.
-+ */
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ int err;
-+ unsigned major;
-+ unsigned minor;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, backend_watch);
-+ struct xenbus_device *dev = be->dev;
-+
-+ DPRINTK("");
-+
-+ err = xenbus_scanf(XBT_NULL, dev->nodename, "physical-device", "%x:%x",
-+ &major, &minor);
-+ if (XENBUS_EXIST_ERR(err)) {
-+ /* Since this watch will fire once immediately after it is
-+ registered, we expect this. Ignore it, and wait for the
-+ hotplug scripts. */
-+ return;
-+ }
-+ if (err != 2) {
-+ xenbus_dev_fatal(dev, err, "reading physical-device");
-+ return;
-+ }
-+
-+ if (be->major && be->minor &&
-+ (be->major != major || be->minor != minor)) {
-+ printk(KERN_WARNING
-+ "blkback: changing physical device (from %x:%x to "
-+ "%x:%x) not supported.\n", be->major, be->minor,
-+ major, minor);
-+ return;
-+ }
-+
-+ be->mode = xenbus_read(XBT_NULL, dev->nodename, "mode", NULL);
-+ if (IS_ERR(be->mode)) {
-+ err = PTR_ERR(be->mode);
-+ be->mode = NULL;
-+ xenbus_dev_fatal(dev, err, "reading mode");
-+ return;
-+ }
-+
-+ if (be->major == 0 && be->minor == 0) {
-+ /* Front end dir is a number, which is used as the handle. */
-+
-+ char *p = strrchr(dev->otherend, '/') + 1;
-+ long handle = simple_strtoul(p, NULL, 0);
-+
-+ be->major = major;
-+ be->minor = minor;
-+
-+ err = vbd_create(be->blkif, handle, major, minor,
-+ (NULL == strchr(be->mode, 'w')));
-+ if (err) {
-+ be->major = 0;
-+ be->minor = 0;
-+ xenbus_dev_fatal(dev, err, "creating vbd structure");
-+ return;
-+ }
-+
-+ be->blkif->xenblkd = kthread_run(blkif_schedule, be->blkif,
-+ "xvd %d %02x:%02x",
-+ be->blkif->domid,
-+ be->major, be->minor);
-+ if (IS_ERR(be->blkif->xenblkd)) {
-+ err = PTR_ERR(be->blkif->xenblkd);
-+ be->blkif->xenblkd = NULL;
-+ xenbus_dev_error(dev, err, "start xenblkd");
-+ return;
-+ }
-+
-+ device_create_file(&dev->dev, &dev_attr_physical_device);
-+ device_create_file(&dev->dev, &dev_attr_mode);
-+
-+ /* We're potentially connected now */
-+ update_blkif_status(be->blkif);
-+ }
-+}
-+
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+ XenbusState frontend_state)
-+{
-+ struct backend_info *be = dev->data;
-+ int err;
-+
-+ DPRINTK("");
-+
-+ switch (frontend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateConnected:
-+ break;
-+
-+ case XenbusStateInitialised:
-+ err = connect_ring(be);
-+ if (err) {
-+ return;
-+ }
-+ update_blkif_status(be->blkif);
-+ break;
-+
-+ case XenbusStateClosing:
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateClosed:
-+ device_unregister(&dev->dev);
-+ break;
-+
-+ case XenbusStateUnknown:
-+ case XenbusStateInitWait:
-+ default:
-+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+ frontend_state);
-+ break;
-+ }
-+}
-+
-+
-+/* ** Connection ** */
-+
-+
-+static void maybe_connect(struct backend_info *be)
-+{
-+ if ((be->major != 0 || be->minor != 0) &&
-+ be->blkif->status == CONNECTED)
-+ connect(be);
-+}
-+
-+
-+/**
-+ * Write the physical details regarding the block device to the store, and
-+ * switch to Connected state.
-+ */
-+static void connect(struct backend_info *be)
-+{
-+ xenbus_transaction_t xbt;
-+ int err;
-+ struct xenbus_device *dev = be->dev;
-+
-+ DPRINTK("%s", dev->otherend);
-+
-+ /* Supply the information about the device the frontend needs */
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ return;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, "sectors", "%lu",
-+ vbd_size(&be->blkif->vbd));
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "writing %s/sectors",
-+ dev->nodename);
-+ goto abort;
-+ }
-+
-+ /* FIXME: use a typename instead */
-+ err = xenbus_printf(xbt, dev->nodename, "info", "%u",
-+ vbd_info(&be->blkif->vbd));
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "writing %s/info",
-+ dev->nodename);
-+ goto abort;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
-+ vbd_secsize(&be->blkif->vbd));
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "writing %s/sector-size",
-+ dev->nodename);
-+ goto abort;
-+ }
-+
-+ err = xenbus_switch_state(dev, xbt, XenbusStateConnected);
-+ if (err)
-+ goto abort;
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+ if (err)
-+ xenbus_dev_fatal(dev, err, "ending transaction");
-+ return;
-+ abort:
-+ xenbus_transaction_end(xbt, 1);
-+}
-+
-+
-+static int connect_ring(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long ring_ref;
-+ unsigned int evtchn;
-+ int err;
-+
-+ DPRINTK("%s", dev->otherend);
-+
-+ err = xenbus_gather(XBT_NULL, dev->otherend, "ring-ref", "%lu", &ring_ref,
-+ "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ dev->otherend);
-+ return err;
-+ }
-+
-+ /* Map the shared frame, irq etc. */
-+ err = blkif_map(be->blkif, ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
-+ ring_ref, evtchn);
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id blkback_ids[] = {
-+ { "vbd" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver blkback = {
-+ .name = "vbd",
-+ .owner = THIS_MODULE,
-+ .ids = blkback_ids,
-+ .probe = blkback_probe,
-+ .remove = blkback_remove,
-+ .otherend_changed = frontend_changed
-+};
-+
-+
-+void blkif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&blkback);
-+}
-+
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blkfront/Kconfig b/drivers/xen/blkfront/Kconfig
-new file mode 100644
-index 0000000..edde837
---- /dev/null
-+++ b/drivers/xen/blkfront/Kconfig
-@@ -0,0 +1,6 @@
-+
-+config XENBLOCK
-+ tristate "Block device driver"
-+ depends on ARCH_XEN
-+ help
-+ Block device driver for Xen
-diff --git a/drivers/xen/blkfront/Makefile b/drivers/xen/blkfront/Makefile
-new file mode 100644
-index 0000000..182ef65
---- /dev/null
-+++ b/drivers/xen/blkfront/Makefile
-@@ -0,0 +1,5 @@
-+
-+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o
-+
-+xenblk-objs := blkfront.o vbd.o
-+
-diff --git a/drivers/xen/blkfront/blkfront.c b/drivers/xen/blkfront/blkfront.c
-new file mode 100644
-index 0000000..c8c9dec
---- /dev/null
-+++ b/drivers/xen/blkfront/blkfront.c
-@@ -0,0 +1,831 @@
-+/******************************************************************************
-+ * blkfront.c
-+ *
-+ * XenLinux virtual block-device driver.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004, Christian Limpach
-+ * Copyright (c) 2004, Andrew Warfield
-+ * Copyright (c) 2005, Christopher Clark
-+ * Copyright (c) 2005, XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/version.h>
-+#include "block.h"
-+#include <linux/cdrom.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <scsi/scsi.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+#include <asm/hypervisor.h>
-+
-+#define BLKIF_STATE_DISCONNECTED 0
-+#define BLKIF_STATE_CONNECTED 1
-+#define BLKIF_STATE_SUSPENDED 2
-+
-+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
-+ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
-+#define GRANT_INVALID_REF 0
-+
-+static void connect(struct blkfront_info *);
-+static void blkfront_closing(struct xenbus_device *);
-+static int blkfront_remove(struct xenbus_device *);
-+static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
-+static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
-+
-+static void kick_pending_request_queues(struct blkfront_info *);
-+
-+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
-+static void blkif_restart_queue(void *arg);
-+static void blkif_recover(struct blkfront_info *);
-+static void blkif_completion(struct blk_shadow *);
-+static void blkif_free(struct blkfront_info *, int);
-+
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures and the ring buffer for communication with the backend, and
-+ * inform the backend of the appropriate details for those. Switch to
-+ * Initialised state.
-+ */
-+static int blkfront_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err, vdevice, i;
-+ struct blkfront_info *info;
-+
-+ /* FIXME: Use dynamic device id if this is not set. */
-+ err = xenbus_scanf(XBT_NULL, dev->nodename,
-+ "virtual-device", "%i", &vdevice);
-+ if (err != 1) {
-+ xenbus_dev_fatal(dev, err, "reading virtual-device");
-+ return err;
-+ }
-+
-+ info = kmalloc(sizeof(*info), GFP_KERNEL);
-+ if (!info) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
-+ return -ENOMEM;
-+ }
-+ info->xbdev = dev;
-+ info->vdevice = vdevice;
-+ info->connected = BLKIF_STATE_DISCONNECTED;
-+ info->mi = NULL;
-+ info->gd = NULL;
-+ INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
-+
-+ info->shadow_free = 0;
-+ memset(info->shadow, 0, sizeof(info->shadow));
-+ for (i = 0; i < BLK_RING_SIZE; i++)
-+ info->shadow[i].req.id = i+1;
-+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
-+
-+ info->users = 0;
-+
-+ /* Front end dir is a number, which is used as the id. */
-+ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
-+ dev->data = info;
-+
-+ err = talk_to_backend(dev, info);
-+ if (err) {
-+ kfree(info);
-+ dev->data = NULL;
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
-+ * driver restart. We tear down our blkif structure and recreate it, but
-+ * leave the device-layer structures intact so that this is transparent to the
-+ * rest of the kernel.
-+ */
-+static int blkfront_resume(struct xenbus_device *dev)
-+{
-+ struct blkfront_info *info = dev->data;
-+ int err;
-+
-+ DPRINTK("blkfront_resume: %s\n", dev->nodename);
-+
-+ blkif_free(info, 1);
-+
-+ err = talk_to_backend(dev, info);
-+ if (!err)
-+ blkif_recover(info);
-+
-+ return err;
-+}
-+
-+
-+/* Common code used when first setting up, and when resuming. */
-+static int talk_to_backend(struct xenbus_device *dev,
-+ struct blkfront_info *info)
-+{
-+ const char *message = NULL;
-+ xenbus_transaction_t xbt;
-+ int err;
-+
-+ /* Create shared ring, alloc event channel. */
-+ err = setup_blkring(dev, info);
-+ if (err)
-+ goto out;
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ goto destroy_blkring;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "ring-ref","%u", info->ring_ref);
-+ if (err) {
-+ message = "writing ring-ref";
-+ goto abort_transaction;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "event-channel", "%u", info->evtchn);
-+ if (err) {
-+ message = "writing event-channel";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_switch_state(dev, xbt, XenbusStateInitialised);
-+ if (err)
-+ goto abort_transaction;
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err) {
-+ if (err == -EAGAIN)
-+ goto again;
-+ xenbus_dev_fatal(dev, err, "completing transaction");
-+ goto destroy_blkring;
-+ }
-+
-+ return 0;
-+
-+ abort_transaction:
-+ xenbus_transaction_end(xbt, 1);
-+ if (message)
-+ xenbus_dev_fatal(dev, err, "%s", message);
-+ destroy_blkring:
-+ blkif_free(info, 0);
-+ out:
-+ return err;
-+}
-+
-+
-+static int setup_blkring(struct xenbus_device *dev,
-+ struct blkfront_info *info)
-+{
-+ blkif_sring_t *sring;
-+ int err;
-+
-+ info->ring_ref = GRANT_INVALID_REF;
-+
-+ sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
-+ if (!sring) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-+ return -ENOMEM;
-+ }
-+ SHARED_RING_INIT(sring);
-+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
-+
-+ err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
-+ if (err < 0) {
-+ free_page((unsigned long)sring);
-+ info->ring.sring = NULL;
-+ goto fail;
-+ }
-+ info->ring_ref = err;
-+
-+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
-+ if (err)
-+ goto fail;
-+
-+ err = bind_evtchn_to_irqhandler(
-+ info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
-+ if (err <= 0) {
-+ xenbus_dev_fatal(dev, err,
-+ "bind_evtchn_to_irqhandler failed");
-+ goto fail;
-+ }
-+ info->irq = err;
-+
-+ return 0;
-+fail:
-+ blkif_free(info, 0);
-+ return err;
-+}
-+
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+ XenbusState backend_state)
-+{
-+ struct blkfront_info *info = dev->data;
-+ struct block_device *bd;
-+
-+ DPRINTK("blkfront:backend_changed.\n");
-+
-+ switch (backend_state) {
-+ case XenbusStateUnknown:
-+ case XenbusStateInitialising:
-+ case XenbusStateInitWait:
-+ case XenbusStateInitialised:
-+ case XenbusStateClosed:
-+ break;
-+
-+ case XenbusStateConnected:
-+ connect(info);
-+ break;
-+
-+ case XenbusStateClosing:
-+ bd = bdget(info->dev);
-+ if (bd == NULL)
-+ xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
-+
-+ down(&bd->bd_sem);
-+ if (info->users > 0)
-+ xenbus_dev_error(dev, -EBUSY,
-+ "Device in use; refusing to close");
-+ else
-+ blkfront_closing(dev);
-+ up(&bd->bd_sem);
-+ bdput(bd);
-+ break;
-+ }
-+}
-+
-+
-+/* ** Connection ** */
-+
-+
-+/*
-+ * Invoked when the backend is finally 'ready' (and has told produced
-+ * the details about the physical device - #sectors, size, etc).
-+ */
-+static void connect(struct blkfront_info *info)
-+{
-+ unsigned long sectors, sector_size;
-+ unsigned int binfo;
-+ int err;
-+
-+ if ((info->connected == BLKIF_STATE_CONNECTED) ||
-+ (info->connected == BLKIF_STATE_SUSPENDED) )
-+ return;
-+
-+ DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
-+
-+ err = xenbus_gather(XBT_NULL, info->xbdev->otherend,
-+ "sectors", "%lu", &sectors,
-+ "info", "%u", &binfo,
-+ "sector-size", "%lu", &sector_size,
-+ NULL);
-+ if (err) {
-+ xenbus_dev_fatal(info->xbdev, err,
-+ "reading backend fields at %s",
-+ info->xbdev->otherend);
-+ return;
-+ }
-+
-+ err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
-+ if (err) {
-+ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
-+ info->xbdev->otherend);
-+ return;
-+ }
-+
-+ (void)xenbus_switch_state(info->xbdev, XBT_NULL, XenbusStateConnected);
-+
-+ /* Kick pending requests. */
-+ spin_lock_irq(&blkif_io_lock);
-+ info->connected = BLKIF_STATE_CONNECTED;
-+ kick_pending_request_queues(info);
-+ spin_unlock_irq(&blkif_io_lock);
-+
-+ add_disk(info->gd);
-+}
-+
-+/**
-+ * Handle the change of state of the backend to Closing. We must delete our
-+ * device-layer structures now, to ensure that writes are flushed through to
-+ * the backend. Once is this done, we can switch to Closed in
-+ * acknowledgement.
-+ */
-+static void blkfront_closing(struct xenbus_device *dev)
-+{
-+ struct blkfront_info *info = dev->data;
-+
-+ DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
-+
-+ if (info->mi) {
-+ DPRINTK("Calling xlvbd_del\n");
-+ xlvbd_del(info);
-+ info->mi = NULL;
-+ }
-+
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
-+}
-+
-+
-+static int blkfront_remove(struct xenbus_device *dev)
-+{
-+ struct blkfront_info *info = dev->data;
-+
-+ DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
-+
-+ blkif_free(info, 0);
-+
-+ kfree(info);
-+
-+ return 0;
-+}
-+
-+
-+static inline int GET_ID_FROM_FREELIST(
-+ struct blkfront_info *info)
-+{
-+ unsigned long free = info->shadow_free;
-+ BUG_ON(free > BLK_RING_SIZE);
-+ info->shadow_free = info->shadow[free].req.id;
-+ info->shadow[free].req.id = 0x0fffffee; /* debug */
-+ return free;
-+}
-+
-+static inline void ADD_ID_TO_FREELIST(
-+ struct blkfront_info *info, unsigned long id)
-+{
-+ info->shadow[id].req.id = info->shadow_free;
-+ info->shadow[id].request = 0;
-+ info->shadow_free = id;
-+}
-+
-+static inline void flush_requests(struct blkfront_info *info)
-+{
-+ int notify;
-+
-+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
-+
-+ if (notify)
-+ notify_remote_via_irq(info->irq);
-+}
-+
-+static void kick_pending_request_queues(struct blkfront_info *info)
-+{
-+ if (!RING_FULL(&info->ring)) {
-+ /* Re-enable calldowns. */
-+ blk_start_queue(info->rq);
-+ /* Kick things off immediately. */
-+ do_blkif_request(info->rq);
-+ }
-+}
-+
-+static void blkif_restart_queue(void *arg)
-+{
-+ struct blkfront_info *info = (struct blkfront_info *)arg;
-+ spin_lock_irq(&blkif_io_lock);
-+ kick_pending_request_queues(info);
-+ spin_unlock_irq(&blkif_io_lock);
-+}
-+
-+static void blkif_restart_queue_callback(void *arg)
-+{
-+ struct blkfront_info *info = (struct blkfront_info *)arg;
-+ schedule_work(&info->work);
-+}
-+
-+int blkif_open(struct inode *inode, struct file *filep)
-+{
-+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
-+ info->users++;
-+ return 0;
-+}
-+
-+
-+int blkif_release(struct inode *inode, struct file *filep)
-+{
-+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
-+ info->users--;
-+ if (info->users == 0) {
-+ /* Check whether we have been instructed to close. We will
-+ have ignored this request initially, as the device was
-+ still mounted. */
-+ struct xenbus_device * dev = info->xbdev;
-+ XenbusState state = xenbus_read_driver_state(dev->otherend);
-+
-+ if (state == XenbusStateClosing)
-+ blkfront_closing(dev);
-+ }
-+ return 0;
-+}
-+
-+
-+int blkif_ioctl(struct inode *inode, struct file *filep,
-+ unsigned command, unsigned long argument)
-+{
-+ int i;
-+
-+ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
-+ command, (long)argument, inode->i_rdev);
-+
-+ switch (command) {
-+ case HDIO_GETGEO:
-+ /* return ENOSYS to use defaults */
-+ return -ENOSYS;
-+
-+ case CDROMMULTISESSION:
-+ DPRINTK("FIXME: support multisession CDs later\n");
-+ for (i = 0; i < sizeof(struct cdrom_multisession); i++)
-+ if (put_user(0, (char __user *)(argument + i)))
-+ return -EFAULT;
-+ return 0;
-+
-+ default:
-+ /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
-+ command);*/
-+ return -EINVAL; /* same return as native Linux */
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/*
-+ * blkif_queue_request
-+ *
-+ * request block io
-+ *
-+ * id: for guest use only.
-+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
-+ * buffer: buffer to read/write into. this should be a
-+ * virtual address in the guest os.
-+ */
-+static int blkif_queue_request(struct request *req)
-+{
-+ struct blkfront_info *info = req->rq_disk->private_data;
-+ unsigned long buffer_mfn;
-+ blkif_request_t *ring_req;
-+ struct bio *bio;
-+ struct bio_vec *bvec;
-+ int idx;
-+ unsigned long id;
-+ unsigned int fsect, lsect;
-+ int ref;
-+ grant_ref_t gref_head;
-+
-+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
-+ return 1;
-+
-+ if (gnttab_alloc_grant_references(
-+ BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
-+ gnttab_request_free_callback(
-+ &info->callback,
-+ blkif_restart_queue_callback,
-+ info,
-+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+ return 1;
-+ }
-+
-+ /* Fill out a communications ring structure. */
-+ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
-+ id = GET_ID_FROM_FREELIST(info);
-+ info->shadow[id].request = (unsigned long)req;
-+
-+ ring_req->id = id;
-+ ring_req->operation = rq_data_dir(req) ?
-+ BLKIF_OP_WRITE : BLKIF_OP_READ;
-+ ring_req->sector_number = (blkif_sector_t)req->sector;
-+ ring_req->handle = info->handle;
-+
-+ ring_req->nr_segments = 0;
-+ rq_for_each_bio (bio, req) {
-+ bio_for_each_segment (bvec, bio, idx) {
-+ BUG_ON(ring_req->nr_segments
-+ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+ buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
-+ fsect = bvec->bv_offset >> 9;
-+ lsect = fsect + (bvec->bv_len >> 9) - 1;
-+ /* install a grant reference. */
-+ ref = gnttab_claim_grant_reference(&gref_head);
-+ BUG_ON(ref == -ENOSPC);
-+
-+ gnttab_grant_foreign_access_ref(
-+ ref,
-+ info->xbdev->otherend_id,
-+ buffer_mfn,
-+ rq_data_dir(req) );
-+
-+ info->shadow[id].frame[ring_req->nr_segments] =
-+ mfn_to_pfn(buffer_mfn);
-+
-+ ring_req->seg[ring_req->nr_segments] =
-+ (struct blkif_request_segment) {
-+ .gref = ref,
-+ .first_sect = fsect,
-+ .last_sect = lsect };
-+
-+ ring_req->nr_segments++;
-+ }
-+ }
-+
-+ info->ring.req_prod_pvt++;
-+
-+ /* Keep a private copy so we can reissue requests when recovering. */
-+ info->shadow[id].req = *ring_req;
-+
-+ gnttab_free_grant_references(gref_head);
-+
-+ return 0;
-+}
-+
-+/*
-+ * do_blkif_request
-+ * read a block; request is in a request queue
-+ */
-+void do_blkif_request(request_queue_t *rq)
-+{
-+ struct blkfront_info *info = NULL;
-+ struct request *req;
-+ int queued;
-+
-+ DPRINTK("Entered do_blkif_request\n");
-+
-+ queued = 0;
-+
-+ while ((req = elv_next_request(rq)) != NULL) {
-+ info = req->rq_disk->private_data;
-+ if (!blk_fs_request(req)) {
-+ end_request(req, 0);
-+ continue;
-+ }
-+
-+ if (RING_FULL(&info->ring))
-+ goto wait;
-+
-+ DPRINTK("do_blk_req %p: cmd %p, sec %lx, "
-+ "(%u/%li) buffer:%p [%s]\n",
-+ req, req->cmd, req->sector, req->current_nr_sectors,
-+ req->nr_sectors, req->buffer,
-+ rq_data_dir(req) ? "write" : "read");
-+
-+
-+ blkdev_dequeue_request(req);
-+ if (blkif_queue_request(req)) {
-+ blk_requeue_request(rq, req);
-+ wait:
-+ /* Avoid pointless unplugs. */
-+ blk_stop_queue(rq);
-+ break;
-+ }
-+
-+ queued++;
-+ }
-+
-+ if (queued != 0)
-+ flush_requests(info);
-+}
-+
-+
-+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-+{
-+ struct request *req;
-+ blkif_response_t *bret;
-+ RING_IDX i, rp;
-+ unsigned long flags;
-+ struct blkfront_info *info = (struct blkfront_info *)dev_id;
-+
-+ spin_lock_irqsave(&blkif_io_lock, flags);
-+
-+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
-+ spin_unlock_irqrestore(&blkif_io_lock, flags);
-+ return IRQ_HANDLED;
-+ }
-+
-+ again:
-+ rp = info->ring.sring->rsp_prod;
-+ rmb(); /* Ensure we see queued responses up to 'rp'. */
-+
-+ for (i = info->ring.rsp_cons; i != rp; i++) {
-+ unsigned long id;
-+ int ret;
-+
-+ bret = RING_GET_RESPONSE(&info->ring, i);
-+ id = bret->id;
-+ req = (struct request *)info->shadow[id].request;
-+
-+ blkif_completion(&info->shadow[id]);
-+
-+ ADD_ID_TO_FREELIST(info, id);
-+
-+ switch (bret->operation) {
-+ case BLKIF_OP_READ:
-+ case BLKIF_OP_WRITE:
-+ if (unlikely(bret->status != BLKIF_RSP_OKAY))
-+ DPRINTK("Bad return from blkdev data "
-+ "request: %x\n", bret->status);
-+
-+ ret = end_that_request_first(
-+ req, (bret->status == BLKIF_RSP_OKAY),
-+ req->hard_nr_sectors);
-+ BUG_ON(ret);
-+ end_that_request_last(
-+ req, (bret->status == BLKIF_RSP_OKAY));
-+ break;
-+ default:
-+ BUG();
-+ }
-+ }
-+
-+ info->ring.rsp_cons = i;
-+
-+ if (i != info->ring.req_prod_pvt) {
-+ int more_to_do;
-+ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
-+ if (more_to_do)
-+ goto again;
-+ } else
-+ info->ring.sring->rsp_event = i + 1;
-+
-+ kick_pending_request_queues(info);
-+
-+ spin_unlock_irqrestore(&blkif_io_lock, flags);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void blkif_free(struct blkfront_info *info, int suspend)
-+{
-+ /* Prevent new requests being issued until we fix things up. */
-+ spin_lock_irq(&blkif_io_lock);
-+ info->connected = suspend ?
-+ BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
-+ spin_unlock_irq(&blkif_io_lock);
-+
-+ /* Free resources associated with old device channel. */
-+ if (info->ring_ref != GRANT_INVALID_REF) {
-+ gnttab_end_foreign_access(info->ring_ref, 0,
-+ (unsigned long)info->ring.sring);
-+ info->ring_ref = GRANT_INVALID_REF;
-+ info->ring.sring = NULL;
-+ }
-+ if (info->irq)
-+ unbind_from_irqhandler(info->irq, info);
-+ info->evtchn = info->irq = 0;
-+
-+}
-+
-+static void blkif_completion(struct blk_shadow *s)
-+{
-+ int i;
-+ for (i = 0; i < s->req.nr_segments; i++)
-+ gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
-+}
-+
-+static void blkif_recover(struct blkfront_info *info)
-+{
-+ int i;
-+ blkif_request_t *req;
-+ struct blk_shadow *copy;
-+ int j;
-+
-+ /* Stage 1: Make a safe copy of the shadow state. */
-+ copy = kmalloc(sizeof(info->shadow), GFP_KERNEL | __GFP_NOFAIL);
-+ memcpy(copy, info->shadow, sizeof(info->shadow));
-+
-+ /* Stage 2: Set up free list. */
-+ memset(&info->shadow, 0, sizeof(info->shadow));
-+ for (i = 0; i < BLK_RING_SIZE; i++)
-+ info->shadow[i].req.id = i+1;
-+ info->shadow_free = info->ring.req_prod_pvt;
-+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
-+
-+ /* Stage 3: Find pending requests and requeue them. */
-+ for (i = 0; i < BLK_RING_SIZE; i++) {
-+ /* Not in use? */
-+ if (copy[i].request == 0)
-+ continue;
-+
-+ /* Grab a request slot and copy shadow state into it. */
-+ req = RING_GET_REQUEST(
-+ &info->ring, info->ring.req_prod_pvt);
-+ *req = copy[i].req;
-+
-+ /* We get a new request id, and must reset the shadow state. */
-+ req->id = GET_ID_FROM_FREELIST(info);
-+ memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
-+
-+ /* Rewrite any grant references invalidated by susp/resume. */
-+ for (j = 0; j < req->nr_segments; j++)
-+ gnttab_grant_foreign_access_ref(
-+ req->seg[j].gref,
-+ info->xbdev->otherend_id,
-+ pfn_to_mfn(info->shadow[req->id].frame[j]),
-+ rq_data_dir(
-+ (struct request *)
-+ info->shadow[req->id].request));
-+ info->shadow[req->id].req = *req;
-+
-+ info->ring.req_prod_pvt++;
-+ }
-+
-+ kfree(copy);
-+
-+ (void)xenbus_switch_state(info->xbdev, XBT_NULL, XenbusStateConnected);
-+
-+ /* Now safe for us to use the shared ring */
-+ spin_lock_irq(&blkif_io_lock);
-+ info->connected = BLKIF_STATE_CONNECTED;
-+ spin_unlock_irq(&blkif_io_lock);
-+
-+ /* Send off requeued requests */
-+ flush_requests(info);
-+
-+ /* Kick any other new requests queued since we resumed */
-+ spin_lock_irq(&blkif_io_lock);
-+ kick_pending_request_queues(info);
-+ spin_unlock_irq(&blkif_io_lock);
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id blkfront_ids[] = {
-+ { "vbd" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver blkfront = {
-+ .name = "vbd",
-+ .owner = THIS_MODULE,
-+ .ids = blkfront_ids,
-+ .probe = blkfront_probe,
-+ .remove = blkfront_remove,
-+ .resume = blkfront_resume,
-+ .otherend_changed = backend_changed,
-+};
-+
-+
-+static int __init xlblk_init(void)
-+{
-+ if (xen_init() < 0)
-+ return -ENODEV;
-+
-+ return xenbus_register_frontend(&blkfront);
-+}
-+module_init(xlblk_init);
-+
-+
-+static void xlblk_exit(void)
-+{
-+ return xenbus_unregister_driver(&blkfront);
-+}
-+module_exit(xlblk_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blkfront/block.h b/drivers/xen/blkfront/block.h
-new file mode 100644
-index 0000000..ecede04
---- /dev/null
-+++ b/drivers/xen/blkfront/block.h
-@@ -0,0 +1,162 @@
-+/******************************************************************************
-+ * block.h
-+ *
-+ * Shared definitions between all levels of XenLinux Virtual block devices.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004-2005, Christian Limpach
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_DRIVERS_BLOCK_H__
-+#define __XEN_DRIVERS_BLOCK_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/fs.h>
-+#include <linux/hdreg.h>
-+#include <linux/blkdev.h>
-+#include <linux/major.h>
-+#include <linux/devfs_fs_kernel.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/gnttab.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/io/blkif.h>
-+#include <xen/interface/io/ring.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/uaccess.h>
-+
-+#if 1
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "xen_blk: " fmt, ##args)
-+#else
-+#define IPRINTK(fmt, args...) ((void)0)
-+#endif
-+
-+#if 1
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_blk: " fmt, ##args)
-+#else
-+#define WPRINTK(fmt, args...) ((void)0)
-+#endif
-+
-+#define DPRINTK(_f, _a...) pr_debug(_f, ## _a)
-+
-+#if 0
-+#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
-+#else
-+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
-+#endif
-+
-+struct xlbd_type_info
-+{
-+ int partn_shift;
-+ int disks_per_major;
-+ char *devname;
-+ char *diskname;
-+};
-+
-+struct xlbd_major_info
-+{
-+ int major;
-+ int index;
-+ int usage;
-+ struct xlbd_type_info *type;
-+};
-+
-+struct blk_shadow {
-+ blkif_request_t req;
-+ unsigned long request;
-+ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+};
-+
-+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
-+
-+/*
-+ * We have one of these per vbd, whether ide, scsi or 'other'. They
-+ * hang in private_data off the gendisk structure. We may end up
-+ * putting all kinds of interesting stuff here :-)
-+ */
-+struct blkfront_info
-+{
-+ struct xenbus_device *xbdev;
-+ dev_t dev;
-+ struct gendisk *gd;
-+ int vdevice;
-+ blkif_vdev_t handle;
-+ int connected;
-+ int ring_ref;
-+ blkif_front_ring_t ring;
-+ unsigned int evtchn, irq;
-+ struct xlbd_major_info *mi;
-+ request_queue_t *rq;
-+ struct work_struct work;
-+ struct gnttab_free_callback callback;
-+ struct blk_shadow shadow[BLK_RING_SIZE];
-+ unsigned long shadow_free;
-+
-+ /**
-+ * The number of people holding this device open. We won't allow a
-+ * hot-unplug unless this is 0.
-+ */
-+ int users;
-+};
-+
-+extern spinlock_t blkif_io_lock;
-+
-+extern int blkif_open(struct inode *inode, struct file *filep);
-+extern int blkif_release(struct inode *inode, struct file *filep);
-+extern int blkif_ioctl(struct inode *inode, struct file *filep,
-+ unsigned command, unsigned long argument);
-+extern int blkif_check(dev_t dev);
-+extern int blkif_revalidate(dev_t dev);
-+extern void do_blkif_request (request_queue_t *rq);
-+
-+/* Virtual block-device subsystem. */
-+/* Note that xlvbd_add doesn't call add_disk for you: you're expected
-+ to call add_disk on info->gd once the disk is properly connected
-+ up. */
-+int xlvbd_add(blkif_sector_t capacity, int device,
-+ u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
-+void xlvbd_del(struct blkfront_info *info);
-+
-+#endif /* __XEN_DRIVERS_BLOCK_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blkfront/vbd.c b/drivers/xen/blkfront/vbd.c
-new file mode 100644
-index 0000000..d97f798
---- /dev/null
-+++ b/drivers/xen/blkfront/vbd.c
-@@ -0,0 +1,323 @@
-+/******************************************************************************
-+ * vbd.c
-+ *
-+ * XenLinux virtual block-device driver (xvd).
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004-2005, Christian Limpach
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "block.h"
-+#include <linux/blkdev.h>
-+#include <linux/list.h>
-+
-+#define BLKIF_MAJOR(dev) ((dev)>>8)
-+#define BLKIF_MINOR(dev) ((dev) & 0xff)
-+
-+/*
-+ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
-+ * potentially combinations of the two) in the naming scheme and in a few other
-+ * places.
-+ */
-+
-+#define NUM_IDE_MAJORS 10
-+#define NUM_SCSI_MAJORS 9
-+#define NUM_VBD_MAJORS 1
-+
-+static struct xlbd_type_info xlbd_ide_type = {
-+ .partn_shift = 6,
-+ .disks_per_major = 2,
-+ .devname = "ide",
-+ .diskname = "hd",
-+};
-+
-+static struct xlbd_type_info xlbd_scsi_type = {
-+ .partn_shift = 4,
-+ .disks_per_major = 16,
-+ .devname = "sd",
-+ .diskname = "sd",
-+};
-+
-+static struct xlbd_type_info xlbd_vbd_type = {
-+ .partn_shift = 4,
-+ .disks_per_major = 16,
-+ .devname = "xvd",
-+ .diskname = "xvd",
-+};
-+
-+static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
-+ NUM_VBD_MAJORS];
-+
-+#define XLBD_MAJOR_IDE_START 0
-+#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS)
-+#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
-+
-+#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
-+#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
-+#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
-+
-+/* Information about our VBDs. */
-+#define MAX_VBDS 64
-+static LIST_HEAD(vbds_list);
-+
-+static struct block_device_operations xlvbd_block_fops =
-+{
-+ .owner = THIS_MODULE,
-+ .open = blkif_open,
-+ .release = blkif_release,
-+ .ioctl = blkif_ioctl,
-+};
-+
-+spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
-+
-+static struct xlbd_major_info *
-+xlbd_alloc_major_info(int major, int minor, int index)
-+{
-+ struct xlbd_major_info *ptr;
-+
-+ ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
-+ if (ptr == NULL)
-+ return NULL;
-+
-+ memset(ptr, 0, sizeof(struct xlbd_major_info));
-+
-+ ptr->major = major;
-+
-+ switch (index) {
-+ case XLBD_MAJOR_IDE_RANGE:
-+ ptr->type = &xlbd_ide_type;
-+ ptr->index = index - XLBD_MAJOR_IDE_START;
-+ break;
-+ case XLBD_MAJOR_SCSI_RANGE:
-+ ptr->type = &xlbd_scsi_type;
-+ ptr->index = index - XLBD_MAJOR_SCSI_START;
-+ break;
-+ case XLBD_MAJOR_VBD_RANGE:
-+ ptr->type = &xlbd_vbd_type;
-+ ptr->index = index - XLBD_MAJOR_VBD_START;
-+ break;
-+ }
-+
-+ printk("Registering block device major %i\n", ptr->major);
-+ if (register_blkdev(ptr->major, ptr->type->devname)) {
-+ WPRINTK("can't get major %d with name %s\n",
-+ ptr->major, ptr->type->devname);
-+ kfree(ptr);
-+ return NULL;
-+ }
-+
-+ devfs_mk_dir(ptr->type->devname);
-+ major_info[index] = ptr;
-+ return ptr;
-+}
-+
-+static struct xlbd_major_info *
-+xlbd_get_major_info(int vdevice)
-+{
-+ struct xlbd_major_info *mi;
-+ int major, minor, index;
-+
-+ major = BLKIF_MAJOR(vdevice);
-+ minor = BLKIF_MINOR(vdevice);
-+
-+ switch (major) {
-+ case IDE0_MAJOR: index = 0; break;
-+ case IDE1_MAJOR: index = 1; break;
-+ case IDE2_MAJOR: index = 2; break;
-+ case IDE3_MAJOR: index = 3; break;
-+ case IDE4_MAJOR: index = 4; break;
-+ case IDE5_MAJOR: index = 5; break;
-+ case IDE6_MAJOR: index = 6; break;
-+ case IDE7_MAJOR: index = 7; break;
-+ case IDE8_MAJOR: index = 8; break;
-+ case IDE9_MAJOR: index = 9; break;
-+ case SCSI_DISK0_MAJOR: index = 10; break;
-+ case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
-+ index = 11 + major - SCSI_DISK1_MAJOR;
-+ break;
-+ case SCSI_CDROM_MAJOR: index = 18; break;
-+ default: index = 19; break;
-+ }
-+
-+ mi = ((major_info[index] != NULL) ? major_info[index] :
-+ xlbd_alloc_major_info(major, minor, index));
-+ if (mi)
-+ mi->usage++;
-+ return mi;
-+}
-+
-+static void
-+xlbd_put_major_info(struct xlbd_major_info *mi)
-+{
-+ mi->usage--;
-+ /* XXX: release major if 0 */
-+}
-+
-+static int
-+xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
-+{
-+ request_queue_t *rq;
-+
-+ rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
-+ if (rq == NULL)
-+ return -1;
-+
-+ elevator_init(rq, "noop");
-+
-+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
-+ blk_queue_hardsect_size(rq, sector_size);
-+ blk_queue_max_sectors(rq, 512);
-+
-+ /* Each segment in a request is up to an aligned page in size. */
-+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
-+ blk_queue_max_segment_size(rq, PAGE_SIZE);
-+
-+ /* Ensure a merged request will fit in a single I/O ring slot. */
-+ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+
-+ /* Make sure buffer addresses are sector-aligned. */
-+ blk_queue_dma_alignment(rq, 511);
-+
-+ gd->queue = rq;
-+
-+ return 0;
-+}
-+
-+static int
-+xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, int vdevice,
-+ u16 vdisk_info, u16 sector_size,
-+ struct blkfront_info *info)
-+{
-+ struct gendisk *gd;
-+ struct xlbd_major_info *mi;
-+ int nr_minors = 1;
-+ int err = -ENODEV;
-+
-+ mi = xlbd_get_major_info(vdevice);
-+ if (mi == NULL)
-+ goto out;
-+ info->mi = mi;
-+
-+ if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
-+ nr_minors = 1 << mi->type->partn_shift;
-+
-+ gd = alloc_disk(nr_minors);
-+ if (gd == NULL)
-+ goto out;
-+
-+ if (nr_minors > 1)
-+ sprintf(gd->disk_name, "%s%c", mi->type->diskname,
-+ 'a' + mi->index * mi->type->disks_per_major +
-+ (minor >> mi->type->partn_shift));
-+ else
-+ sprintf(gd->disk_name, "%s%c%d", mi->type->diskname,
-+ 'a' + mi->index * mi->type->disks_per_major +
-+ (minor >> mi->type->partn_shift),
-+ minor & ((1 << mi->type->partn_shift) - 1));
-+
-+ gd->major = mi->major;
-+ gd->first_minor = minor;
-+ gd->fops = &xlvbd_block_fops;
-+ gd->private_data = info;
-+ gd->driverfs_dev = &(info->xbdev->dev);
-+ set_capacity(gd, capacity);
-+
-+ if (xlvbd_init_blk_queue(gd, sector_size)) {
-+ del_gendisk(gd);
-+ goto out;
-+ }
-+
-+ info->rq = gd->queue;
-+
-+ if (vdisk_info & VDISK_READONLY)
-+ set_disk_ro(gd, 1);
-+
-+ if (vdisk_info & VDISK_REMOVABLE)
-+ gd->flags |= GENHD_FL_REMOVABLE;
-+
-+ if (vdisk_info & VDISK_CDROM)
-+ gd->flags |= GENHD_FL_CD;
-+
-+ info->gd = gd;
-+
-+ return 0;
-+
-+ out:
-+ if (mi)
-+ xlbd_put_major_info(mi);
-+ return err;
-+}
-+
-+int
-+xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
-+ u16 sector_size, struct blkfront_info *info)
-+{
-+ struct block_device *bd;
-+ int err = 0;
-+
-+ info->dev = MKDEV(BLKIF_MAJOR(vdevice), BLKIF_MINOR(vdevice));
-+
-+ bd = bdget(info->dev);
-+ if (bd == NULL)
-+ return -ENODEV;
-+
-+ err = xlvbd_alloc_gendisk(BLKIF_MINOR(vdevice), capacity, vdevice,
-+ vdisk_info, sector_size, info);
-+
-+ bdput(bd);
-+ return err;
-+}
-+
-+void
-+xlvbd_del(struct blkfront_info *info)
-+{
-+ struct block_device *bd;
-+
-+ bd = bdget(info->dev);
-+ if (bd == NULL)
-+ return;
-+
-+ if (info->gd == NULL)
-+ return;
-+
-+ del_gendisk(info->gd);
-+ put_disk(info->gd);
-+ xlbd_put_major_info(info->mi);
-+ info->mi = NULL;
-+ blk_cleanup_queue(info->rq);
-+
-+ bdput(bd);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blktap/Makefile b/drivers/xen/blktap/Makefile
-new file mode 100644
-index 0000000..822b35f
---- /dev/null
-+++ b/drivers/xen/blktap/Makefile
-@@ -0,0 +1,3 @@
-+
-+obj-y := xenbus.o interface.o blktap.o
-+
-diff --git a/drivers/xen/blktap/blktap.c b/drivers/xen/blktap/blktap.c
-new file mode 100644
-index 0000000..2442cea
---- /dev/null
-+++ b/drivers/xen/blktap/blktap.c
-@@ -0,0 +1,910 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/blktap/blktap.c
-+ *
-+ * This is a modified version of the block backend driver that remaps requests
-+ * to a user-space memory region. It is intended to be used to write
-+ * application-level servers that provide block interfaces to client VMs.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/spinlock.h>
-+#include <xen/balloon.h>
-+#include <linux/kernel.h>
-+#include <linux/fs.h>
-+#include <linux/mm.h>
-+#include <linux/miscdevice.h>
-+#include <linux/errno.h>
-+#include <linux/major.h>
-+#include <linux/gfp.h>
-+#include <linux/poll.h>
-+#include <asm/tlbflush.h>
-+#include "common.h"
-+
-+/* Only one process may open /dev/xen/blktap at any time. */
-+static unsigned long blktap_dev_inuse;
-+unsigned long blktap_ring_ok; /* make this ring->state */
-+
-+/* Rings up to user space. */
-+static blkif_front_ring_t blktap_ufe_ring;
-+
-+/* for poll: */
-+static wait_queue_head_t blktap_wait;
-+
-+/* current switching mode */
-+static unsigned long blktap_mode;
-+
-+/* local prototypes */
-+static int blktap_read_ufe_ring(void);
-+
-+
-+/* /dev/xen/blktap resides at device number major=10, minor=200 */
-+#define BLKTAP_MINOR 202
-+
-+/* blktap IOCTLs: */
-+#define BLKTAP_IOCTL_KICK_FE 1
-+#define BLKTAP_IOCTL_KICK_BE 2 /* currently unused */
-+#define BLKTAP_IOCTL_SETMODE 3
-+#define BLKTAP_IOCTL_PRINT_IDXS 100
-+
-+/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE) */
-+#define BLKTAP_MODE_PASSTHROUGH 0x00000000 /* default */
-+#define BLKTAP_MODE_INTERCEPT_FE 0x00000001
-+#define BLKTAP_MODE_INTERCEPT_BE 0x00000002 /* unimp. */
-+#define BLKTAP_MODE_COPY_FE 0x00000004 /* unimp. */
-+#define BLKTAP_MODE_COPY_BE 0x00000008 /* unimp. */
-+#define BLKTAP_MODE_COPY_FE_PAGES 0x00000010 /* unimp. */
-+#define BLKTAP_MODE_COPY_BE_PAGES 0x00000020 /* unimp. */
-+
-+#define BLKTAP_MODE_INTERPOSE \
-+ (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
-+
-+#define BLKTAP_MODE_COPY_BOTH \
-+ (BLKTAP_MODE_COPY_FE | BLKTAP_MODE_COPY_BE)
-+
-+#define BLKTAP_MODE_COPY_BOTH_PAGES \
-+ (BLKTAP_MODE_COPY_FE_PAGES | BLKTAP_MODE_COPY_BE_PAGES)
-+
-+static inline int BLKTAP_MODE_VALID(unsigned long arg)
-+{
-+ return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
-+ (arg == BLKTAP_MODE_INTERCEPT_FE) ||
-+ (arg == BLKTAP_MODE_INTERPOSE ));
-+/*
-+ return (
-+ ( arg == BLKTAP_MODE_PASSTHROUGH ) ||
-+ ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
-+ ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
-+ ( arg == BLKTAP_MODE_INTERPOSE ) ||
-+ ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
-+ ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
-+ ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
-+ );
-+*/
-+}
-+
-+
-+/******************************************************************
-+ * MMAP REGION
-+ */
-+
-+/*
-+ * We use a big chunk of address space to map in-flight requests into,
-+ * and export this region up to user-space. See the comments in blkback
-+ * about this -- the two must be kept in sync if the tap is used as a
-+ * passthrough.
-+ */
-+
-+#define MAX_PENDING_REQS 64
-+#define BATCH_PER_DOMAIN 16
-+
-+/* immediately before the mmap area, we have a bunch of pages reserved
-+ * for shared memory rings.
-+ */
-+#define RING_PAGES 1 /* Front */
-+
-+/* Where things are inside the device mapping. */
-+struct vm_area_struct *blktap_vma = NULL;
-+unsigned long mmap_vstart; /* Kernel pages for mapping in data. */
-+unsigned long rings_vstart; /* start of mmaped vma */
-+unsigned long user_vstart; /* start of user mappings */
-+
-+#define MMAP_PAGES \
-+ (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-+#define MMAP_VADDR(_start, _req,_seg) \
-+ (_start + \
-+ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
-+ ((_seg) * PAGE_SIZE))
-+
-+/*
-+ * Each outstanding request that we've passed to the lower device layers has a
-+ * 'pending_req' allocated to it. Each buffer_head that completes decrements
-+ * the pendcnt towards zero. When it hits zero, the specified domain has a
-+ * response queued for it, with the saved 'id' passed back.
-+ */
-+typedef struct {
-+ blkif_t *blkif;
-+ unsigned long id;
-+ int nr_pages;
-+ atomic_t pendcnt;
-+ unsigned short operation;
-+ int status;
-+} pending_req_t;
-+
-+/*
-+ * We can't allocate pending_req's in order, since they may complete out of
-+ * order. We therefore maintain an allocation ring. This ring also indicates
-+ * when enough work has been passed down -- at that point the allocation ring
-+ * will be empty.
-+ */
-+static pending_req_t pending_reqs[MAX_PENDING_REQS];
-+static unsigned char pending_ring[MAX_PENDING_REQS];
-+static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
-+/* NB. We use a different index type to differentiate from shared blk rings. */
-+typedef unsigned int PEND_RING_IDX;
-+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-+static PEND_RING_IDX pending_prod, pending_cons;
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-+
-+/* Requests passing through the tap to the backend hijack the id field
-+ * in the request message. In it we put the AR index _AND_ the fe domid.
-+ * the domid is used by the backend to map the pages properly.
-+ */
-+
-+static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
-+{
-+ return ((fe_dom << 16) | MASK_PEND_IDX(idx));
-+}
-+
-+extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
-+{
-+ return (PEND_RING_IDX)(id & 0x0000ffff);
-+}
-+
-+extern inline domid_t ID_TO_DOM(unsigned long id)
-+{
-+ return (domid_t)(id >> 16);
-+}
-+
-+
-+
-+/******************************************************************
-+ * GRANT HANDLES
-+ */
-+
-+/* When using grant tables to map a frame for device access then the
-+ * handle returned must be used to unmap the frame. This is needed to
-+ * drop the ref count on the frame.
-+ */
-+struct grant_handle_pair
-+{
-+ grant_handle_t kernel;
-+ grant_handle_t user;
-+};
-+static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
-+#define pending_handle(_idx, _i) \
-+ (pending_grant_handles[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
-+#define BLKTAP_INVALID_HANDLE(_g) \
-+ (((_g->kernel) == 0xFFFF) && ((_g->user) == 0xFFFF))
-+#define BLKTAP_INVALIDATE_HANDLE(_g) do { \
-+ (_g)->kernel = 0xFFFF; (_g)->user = 0xFFFF; \
-+ } while(0)
-+
-+
-+/******************************************************************
-+ * BLKTAP VM OPS
-+ */
-+
-+static struct page *blktap_nopage(struct vm_area_struct *vma,
-+ unsigned long address,
-+ int *type)
-+{
-+ /*
-+ * if the page has not been mapped in by the driver then generate
-+ * a SIGBUS to the domain.
-+ */
-+ force_sig(SIGBUS, current);
-+
-+ return 0;
-+}
-+
-+struct vm_operations_struct blktap_vm_ops = {
-+ .nopage = blktap_nopage,
-+};
-+
-+/******************************************************************
-+ * BLKTAP FILE OPS
-+ */
-+
-+static int blktap_open(struct inode *inode, struct file *filp)
-+{
-+ blkif_sring_t *sring;
-+
-+ if (test_and_set_bit(0, &blktap_dev_inuse))
-+ return -EBUSY;
-+
-+ /* Allocate the fe ring. */
-+ sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
-+ if (sring == NULL)
-+ return -ENOMEM;
-+
-+ SetPageReserved(virt_to_page(sring));
-+
-+ SHARED_RING_INIT(sring);
-+ FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
-+
-+ return 0;
-+}
-+
-+static int blktap_release(struct inode *inode, struct file *filp)
-+{
-+ blktap_dev_inuse = 0;
-+ blktap_ring_ok = 0;
-+
-+ /* Free the ring page. */
-+ ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
-+ free_page((unsigned long) blktap_ufe_ring.sring);
-+
-+ /* Clear any active mappings and free foreign map table */
-+ if (blktap_vma != NULL) {
-+ zap_page_range(
-+ blktap_vma, blktap_vma->vm_start,
-+ blktap_vma->vm_end - blktap_vma->vm_start, NULL);
-+ blktap_vma = NULL;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/* Note on mmap:
-+ * We need to map pages to user space in a way that will allow the block
-+ * subsystem set up direct IO to them. This couldn't be done before, because
-+ * there isn't really a sane way to translate a user virtual address down to a
-+ * physical address when the page belongs to another domain.
-+ *
-+ * My first approach was to map the page in to kernel memory, add an entry
-+ * for it in the physical frame list (using alloc_lomem_region as in blkback)
-+ * and then attempt to map that page up to user space. This is disallowed
-+ * by xen though, which realizes that we don't really own the machine frame
-+ * underlying the physical page.
-+ *
-+ * The new approach is to provide explicit support for this in xen linux.
-+ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
-+ * mapped from other vms. vma->vm_private_data is set up as a mapping
-+ * from pages to actual page structs. There is a new clause in get_user_pages
-+ * that does the right thing for this sort of mapping.
-+ */
-+static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
-+{
-+ int size;
-+ struct page **map;
-+ int i;
-+
-+ DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
-+ vma->vm_start, vma->vm_end);
-+
-+ vma->vm_flags |= VM_RESERVED;
-+ vma->vm_ops = &blktap_vm_ops;
-+
-+ size = vma->vm_end - vma->vm_start;
-+ if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) {
-+ printk(KERN_INFO
-+ "blktap: you _must_ map exactly %d pages!\n",
-+ MMAP_PAGES + RING_PAGES);
-+ return -EAGAIN;
-+ }
-+
-+ size >>= PAGE_SHIFT;
-+ DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
-+
-+ rings_vstart = vma->vm_start;
-+ user_vstart = rings_vstart + (RING_PAGES << PAGE_SHIFT);
-+
-+ /* Map the ring pages to the start of the region and reserve it. */
-+
-+ /* not sure if I really need to do this... */
-+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-+
-+ if (remap_pfn_range(vma, vma->vm_start,
-+ __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT,
-+ PAGE_SIZE, vma->vm_page_prot)) {
-+ WPRINTK("Mapping user ring failed!\n");
-+ goto fail;
-+ }
-+
-+ /* Mark this VM as containing foreign pages, and set up mappings. */
-+ map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
-+ * sizeof(struct page_struct*),
-+ GFP_KERNEL);
-+ if (map == NULL) {
-+ WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
-+ goto fail;
-+ }
-+
-+ for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
-+ map[i] = NULL;
-+
-+ vma->vm_private_data = map;
-+ vma->vm_flags |= VM_FOREIGN;
-+
-+ blktap_vma = vma;
-+ blktap_ring_ok = 1;
-+
-+ return 0;
-+ fail:
-+ /* Clear any active mappings. */
-+ zap_page_range(vma, vma->vm_start,
-+ vma->vm_end - vma->vm_start, NULL);
-+
-+ return -ENOMEM;
-+}
-+
-+static int blktap_ioctl(struct inode *inode, struct file *filp,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ switch(cmd) {
-+ case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
-+ return blktap_read_ufe_ring();
-+
-+ case BLKTAP_IOCTL_SETMODE:
-+ if (BLKTAP_MODE_VALID(arg)) {
-+ blktap_mode = arg;
-+ /* XXX: may need to flush rings here. */
-+ printk(KERN_INFO "blktap: set mode to %lx\n", arg);
-+ return 0;
-+ }
-+ case BLKTAP_IOCTL_PRINT_IDXS:
-+ {
-+ //print_fe_ring_idxs();
-+ WPRINTK("User Rings: \n-----------\n");
-+ WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
-+ "| req_prod: %2d, rsp_prod: %2d\n",
-+ blktap_ufe_ring.rsp_cons,
-+ blktap_ufe_ring.req_prod_pvt,
-+ blktap_ufe_ring.sring->req_prod,
-+ blktap_ufe_ring.sring->rsp_prod);
-+
-+ }
-+ }
-+ return -ENOIOCTLCMD;
-+}
-+
-+static unsigned int blktap_poll(struct file *file, poll_table *wait)
-+{
-+ poll_wait(file, &blktap_wait, wait);
-+ if (blktap_ufe_ring.req_prod_pvt != blktap_ufe_ring.sring->req_prod) {
-+ flush_tlb_all();
-+ RING_PUSH_REQUESTS(&blktap_ufe_ring);
-+ return POLLIN | POLLRDNORM;
-+ }
-+
-+ return 0;
-+}
-+
-+void blktap_kick_user(void)
-+{
-+ /* blktap_ring->req_prod = blktap_req_prod; */
-+ wake_up_interruptible(&blktap_wait);
-+}
-+
-+static struct file_operations blktap_fops = {
-+ .owner = THIS_MODULE,
-+ .poll = blktap_poll,
-+ .ioctl = blktap_ioctl,
-+ .open = blktap_open,
-+ .release = blktap_release,
-+ .mmap = blktap_mmap,
-+};
-+
-+
-+
-+static int do_block_io_op(blkif_t *blkif, int max_to_do);
-+static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
-+static void make_response(blkif_t *blkif, unsigned long id,
-+ unsigned short op, int st);
-+
-+
-+static void fast_flush_area(int idx, int nr_pages)
-+{
-+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-+ unsigned int i, op = 0;
-+ struct grant_handle_pair *handle;
-+ uint64_t ptep;
-+ int ret;
-+
-+ for ( i = 0; i < nr_pages; i++)
-+ {
-+ handle = &pending_handle(idx, i);
-+ if (BLKTAP_INVALID_HANDLE(handle))
-+ continue;
-+
-+ unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
-+ unmap[op].dev_bus_addr = 0;
-+ unmap[op].handle = handle->kernel;
-+ op++;
-+
-+ if (create_lookup_pte_addr(
-+ blktap_vma->vm_mm,
-+ MMAP_VADDR(user_vstart, idx, i),
-+ &ptep) !=0) {
-+ DPRINTK("Couldn't get a pte addr!\n");
-+ return;
-+ }
-+ unmap[op].host_addr = ptep;
-+ unmap[op].dev_bus_addr = 0;
-+ unmap[op].handle = handle->user;
-+ op++;
-+
-+ BLKTAP_INVALIDATE_HANDLE(handle);
-+ }
-+
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, op);
-+ BUG_ON(ret);
-+
-+ if (blktap_vma != NULL)
-+ zap_page_range(blktap_vma,
-+ MMAP_VADDR(user_vstart, idx, 0),
-+ nr_pages << PAGE_SHIFT, NULL);
-+}
-+
-+/******************************************************************
-+ * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
-+ */
-+
-+static struct list_head blkio_schedule_list;
-+static spinlock_t blkio_schedule_list_lock;
-+
-+static int __on_blkdev_list(blkif_t *blkif)
-+{
-+ return blkif->blkdev_list.next != NULL;
-+}
-+
-+static void remove_from_blkdev_list(blkif_t *blkif)
-+{
-+ unsigned long flags;
-+
-+ if (!__on_blkdev_list(blkif))
-+ return;
-+
-+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-+ if (__on_blkdev_list(blkif)) {
-+ list_del(&blkif->blkdev_list);
-+ blkif->blkdev_list.next = NULL;
-+ blkif_put(blkif);
-+ }
-+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
-+}
-+
-+static void add_to_blkdev_list_tail(blkif_t *blkif)
-+{
-+ unsigned long flags;
-+
-+ if (__on_blkdev_list(blkif))
-+ return;
-+
-+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-+ if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
-+ list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
-+ blkif_get(blkif);
-+ }
-+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
-+}
-+
-+
-+/******************************************************************
-+ * SCHEDULER FUNCTIONS
-+ */
-+
-+static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
-+
-+static int blkio_schedule(void *arg)
-+{
-+ DECLARE_WAITQUEUE(wq, current);
-+
-+ blkif_t *blkif;
-+ struct list_head *ent;
-+
-+ daemonize("xenblkd");
-+
-+ for (;;) {
-+ /* Wait for work to do. */
-+ add_wait_queue(&blkio_schedule_wait, &wq);
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ if ((NR_PENDING_REQS == MAX_PENDING_REQS) ||
-+ list_empty(&blkio_schedule_list))
-+ schedule();
-+ __set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&blkio_schedule_wait, &wq);
-+
-+ /* Queue up a batch of requests. */
-+ while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
-+ !list_empty(&blkio_schedule_list)) {
-+ ent = blkio_schedule_list.next;
-+ blkif = list_entry(ent, blkif_t, blkdev_list);
-+ blkif_get(blkif);
-+ remove_from_blkdev_list(blkif);
-+ if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
-+ add_to_blkdev_list_tail(blkif);
-+ blkif_put(blkif);
-+ }
-+ }
-+}
-+
-+static void maybe_trigger_blkio_schedule(void)
-+{
-+ /*
-+ * Needed so that two processes, who together make the following
-+ * predicate true, don't both read stale values and evaluate the
-+ * predicate incorrectly. Incredibly unlikely to stall the scheduler
-+ * on the x86, but...
-+ */
-+ smp_mb();
-+
-+ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-+ !list_empty(&blkio_schedule_list))
-+ wake_up(&blkio_schedule_wait);
-+}
-+
-+
-+
-+/******************************************************************
-+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
-+ */
-+
-+
-+static int blktap_read_ufe_ring(void)
-+{
-+ /* This is called to read responses from the UFE ring. */
-+
-+ RING_IDX i, j, rp;
-+ blkif_response_t *resp;
-+ blkif_t *blkif;
-+ int pending_idx;
-+ pending_req_t *pending_req;
-+ unsigned long flags;
-+
-+ /* if we are forwarding from UFERring to FERing */
-+ if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
-+
-+ /* for each outstanding message on the UFEring */
-+ rp = blktap_ufe_ring.sring->rsp_prod;
-+ rmb();
-+
-+ for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) {
-+ resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
-+ pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
-+ pending_req = &pending_reqs[pending_idx];
-+
-+ blkif = pending_req->blkif;
-+ for (j = 0; j < pending_req->nr_pages; j++) {
-+ unsigned long vaddr;
-+ struct page **map = blktap_vma->vm_private_data;
-+ int offset;
-+
-+ vaddr = MMAP_VADDR(user_vstart, pending_idx, j);
-+ offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
-+
-+ //ClearPageReserved(virt_to_page(vaddr));
-+ ClearPageReserved((struct page *)map[offset]);
-+ map[offset] = NULL;
-+ }
-+
-+ fast_flush_area(pending_idx, pending_req->nr_pages);
-+ make_response(blkif, pending_req->id, resp->operation,
-+ resp->status);
-+ blkif_put(pending_req->blkif);
-+ spin_lock_irqsave(&pend_prod_lock, flags);
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+ spin_unlock_irqrestore(&pend_prod_lock, flags);
-+ }
-+ blktap_ufe_ring.rsp_cons = i;
-+ maybe_trigger_blkio_schedule();
-+ }
-+ return 0;
-+}
-+
-+
-+/******************************************************************************
-+ * NOTIFICATION FROM GUEST OS.
-+ */
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ blkif_t *blkif = dev_id;
-+ add_to_blkdev_list_tail(blkif);
-+ maybe_trigger_blkio_schedule();
-+ return IRQ_HANDLED;
-+}
-+
-+
-+
-+/******************************************************************
-+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
-+ */
-+
-+static int do_block_io_op(blkif_t *blkif, int max_to_do)
-+{
-+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-+ blkif_request_t *req;
-+ RING_IDX i, rp;
-+ int more_to_do = 0;
-+
-+ rp = blk_ring->sring->req_prod;
-+ rmb(); /* Ensure we see queued requests up to 'rp'. */
-+
-+ for (i = blk_ring->req_cons;
-+ (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
-+ i++ ) {
-+ if ((max_to_do-- == 0) ||
-+ (NR_PENDING_REQS == MAX_PENDING_REQS)) {
-+ more_to_do = 1;
-+ break;
-+ }
-+
-+ req = RING_GET_REQUEST(blk_ring, i);
-+ switch (req->operation) {
-+ case BLKIF_OP_READ:
-+ case BLKIF_OP_WRITE:
-+ dispatch_rw_block_io(blkif, req);
-+ break;
-+
-+ default:
-+ DPRINTK("error: unknown block io operation [%d]\n",
-+ req->operation);
-+ make_response(blkif, req->id, req->operation,
-+ BLKIF_RSP_ERROR);
-+ break;
-+ }
-+ }
-+
-+ blk_ring->req_cons = i;
-+ blktap_kick_user();
-+
-+ return more_to_do;
-+}
-+
-+static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
-+{
-+ blkif_request_t *target;
-+ int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-+ pending_req_t *pending_req;
-+ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-+ int op, ret;
-+ unsigned int nseg;
-+ int retval;
-+
-+ /* Check that number of segments is sane. */
-+ nseg = req->nr_segments;
-+ if (unlikely(nseg == 0) ||
-+ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
-+ DPRINTK("Bad number of segments in request (%d)\n", nseg);
-+ goto bad_descriptor;
-+ }
-+
-+ /* Make sure userspace is ready. */
-+ if (!blktap_ring_ok) {
-+ DPRINTK("blktap: ring not ready for requests!\n");
-+ goto bad_descriptor;
-+ }
-+
-+
-+ if (RING_FULL(&blktap_ufe_ring)) {
-+ WPRINTK("blktap: fe_ring is full, can't add "
-+ "(very broken!).\n");
-+ goto bad_descriptor;
-+ }
-+
-+ flush_cache_all(); /* a noop on intel... */
-+
-+ /* Map the foreign pages directly in to the application */
-+ op = 0;
-+ for (i = 0; i < req->nr_segments; i++) {
-+
-+ unsigned long uvaddr;
-+ unsigned long kvaddr;
-+ uint64_t ptep;
-+
-+ uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
-+ kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
-+
-+ /* Map the remote page to kernel. */
-+ map[op].host_addr = kvaddr;
-+ map[op].dom = blkif->domid;
-+ map[op].ref = req->seg[i].gref;
-+ map[op].flags = GNTMAP_host_map;
-+ /* This needs a bit more thought in terms of interposition:
-+ * If we want to be able to modify pages during write using
-+ * grant table mappings, the guest will either need to allow
-+ * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
-+ if (req->operation == BLKIF_OP_WRITE)
-+ map[op].flags |= GNTMAP_readonly;
-+ op++;
-+
-+ /* Now map it to user. */
-+ ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
-+ if (ret) {
-+ DPRINTK("Couldn't get a pte addr!\n");
-+ fast_flush_area(pending_idx, req->nr_segments);
-+ goto bad_descriptor;
-+ }
-+
-+ map[op].host_addr = ptep;
-+ map[op].dom = blkif->domid;
-+ map[op].ref = req->seg[i].gref;
-+ map[op].flags = GNTMAP_host_map | GNTMAP_application_map
-+ | GNTMAP_contains_pte;
-+ /* Above interposition comment applies here as well. */
-+ if (req->operation == BLKIF_OP_WRITE)
-+ map[op].flags |= GNTMAP_readonly;
-+ op++;
-+ }
-+
-+ retval = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
-+ BUG_ON(retval);
-+
-+ op = 0;
-+ for (i = 0; i < (req->nr_segments*2); i += 2) {
-+ unsigned long uvaddr;
-+ unsigned long kvaddr;
-+ unsigned long offset;
-+ int cancel = 0;
-+
-+ uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
-+ kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
-+
-+ if (unlikely(map[i].status)) {
-+ DPRINTK("Error on kernel grant mapping (%d)\n",
-+ map[i].status);
-+ ret = map[i].status;
-+ cancel = 1;
-+ }
-+
-+ if (unlikely(map[i+1].status)) {
-+ DPRINTK("Error on user grant mapping (%d)\n",
-+ map[i+1].status);
-+ ret = map[i+1].status;
-+ cancel = 1;
-+ }
-+
-+ if (cancel) {
-+ fast_flush_area(pending_idx, req->nr_segments);
-+ goto bad_descriptor;
-+ }
-+
-+ /* Set the necessary mappings in p2m and in the VM_FOREIGN
-+ * vm_area_struct to allow user vaddr -> struct page lookups
-+ * to work. This is needed for direct IO to foreign pages. */
-+ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
-+
-+ offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
-+ ((struct page **)blktap_vma->vm_private_data)[offset] =
-+ pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+
-+ /* Save handles for unmapping later. */
-+ pending_handle(pending_idx, i/2).kernel = map[i].handle;
-+ pending_handle(pending_idx, i/2).user = map[i+1].handle;
-+ }
-+
-+ /* Mark mapped pages as reserved: */
-+ for (i = 0; i < req->nr_segments; i++) {
-+ unsigned long kvaddr;
-+ kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
-+ SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
-+ }
-+
-+ pending_req = &pending_reqs[pending_idx];
-+ pending_req->blkif = blkif;
-+ pending_req->id = req->id;
-+ pending_req->operation = req->operation;
-+ pending_req->status = BLKIF_RSP_OKAY;
-+ pending_req->nr_pages = nseg;
-+ req->id = MAKE_ID(blkif->domid, pending_idx);
-+ //atomic_set(&pending_req->pendcnt, nbio);
-+ pending_cons++;
-+ blkif_get(blkif);
-+
-+ /* Finally, write the request message to the user ring. */
-+ target = RING_GET_REQUEST(&blktap_ufe_ring,
-+ blktap_ufe_ring.req_prod_pvt);
-+ memcpy(target, req, sizeof(*req));
-+ blktap_ufe_ring.req_prod_pvt++;
-+ return;
-+
-+ bad_descriptor:
-+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-+}
-+
-+
-+
-+/******************************************************************
-+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
-+ */
-+
-+
-+static void make_response(blkif_t *blkif, unsigned long id,
-+ unsigned short op, int st)
-+{
-+ blkif_response_t *resp;
-+ unsigned long flags;
-+ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-+
-+ /* Place on the response ring for the relevant domain. */
-+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-+ resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-+ resp->id = id;
-+ resp->operation = op;
-+ resp->status = st;
-+ wmb(); /* Ensure other side can see the response fields. */
-+ blk_ring->rsp_prod_pvt++;
-+ RING_PUSH_RESPONSES(blk_ring);
-+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-+
-+ /* Kick the relevant domain. */
-+ notify_remote_via_irq(blkif->irq);
-+}
-+
-+static struct miscdevice blktap_miscdev = {
-+ .minor = BLKTAP_MINOR,
-+ .name = "blktap",
-+ .fops = &blktap_fops,
-+ .devfs_name = "misc/blktap",
-+};
-+
-+void blkif_deschedule(blkif_t *blkif)
-+{
-+ remove_from_blkdev_list(blkif);
-+}
-+
-+static int __init blkif_init(void)
-+{
-+ int i, j, err;
-+ struct page *page;
-+
-+ blkif_interface_init();
-+
-+ page = balloon_alloc_empty_page_range(MMAP_PAGES);
-+ BUG_ON(page == NULL);
-+ mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-+
-+ pending_cons = 0;
-+ pending_prod = MAX_PENDING_REQS;
-+ memset(pending_reqs, 0, sizeof(pending_reqs));
-+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
-+ pending_ring[i] = i;
-+
-+ spin_lock_init(&blkio_schedule_list_lock);
-+ INIT_LIST_HEAD(&blkio_schedule_list);
-+
-+ i = kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES);
-+ BUG_ON(i<0);
-+
-+ blkif_xenbus_init();
-+
-+ for (i = 0; i < MAX_PENDING_REQS ; i++)
-+ for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
-+ BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
-+
-+ err = misc_register(&blktap_miscdev);
-+ if (err != 0) {
-+ printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n",
-+ err);
-+ return err;
-+ }
-+
-+ init_waitqueue_head(&blktap_wait);
-+
-+ return 0;
-+}
-+
-+__initcall(blkif_init);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blktap/common.h b/drivers/xen/blktap/common.h
-new file mode 100644
-index 0000000..c261822
---- /dev/null
-+++ b/drivers/xen/blktap/common.h
-@@ -0,0 +1,110 @@
-+
-+#ifndef __BLKIF__BACKEND__COMMON_H__
-+#define __BLKIF__BACKEND__COMMON_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <asm/io.h>
-+#include <asm/setup.h>
-+#include <asm/pgalloc.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/io/blkif.h>
-+#include <xen/interface/io/ring.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+
-+#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
-+
-+struct vbd {
-+ blkif_vdev_t handle; /* what the domain refers to this vbd as */
-+ unsigned char readonly; /* Non-zero -> read-only */
-+ unsigned char type; /* VDISK_xxx */
-+ u32 pdevice; /* phys device that this vbd maps to */
-+ struct block_device *bdev;
-+};
-+
-+typedef struct blkif_st {
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+ /* Physical parameters of the comms window. */
-+ unsigned int evtchn;
-+ unsigned int irq;
-+ /* Comms information. */
-+ blkif_back_ring_t blk_ring;
-+ struct vm_struct *blk_ring_area;
-+ /* VBDs attached to this interface. */
-+ struct vbd vbd;
-+ /* Private fields. */
-+ enum { DISCONNECTED, CONNECTED } status;
-+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
-+ /* Is this a blktap frontend */
-+ unsigned int is_blktap;
-+#endif
-+ struct list_head blkdev_list;
-+ spinlock_t blk_ring_lock;
-+ atomic_t refcnt;
-+
-+ struct work_struct free_work;
-+
-+ grant_handle_t shmem_handle;
-+ grant_ref_t shmem_ref;
-+} blkif_t;
-+
-+blkif_t *alloc_blkif(domid_t domid);
-+void free_blkif_callback(blkif_t *blkif);
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
-+
-+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define blkif_put(_b) \
-+ do { \
-+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-+ free_blkif_callback(_b); \
-+ } while (0)
-+
-+/* Create a vbd. */
-+int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice,
-+ int readonly);
-+void vbd_free(struct vbd *vbd);
-+
-+unsigned long vbd_size(struct vbd *vbd);
-+unsigned int vbd_info(struct vbd *vbd);
-+unsigned long vbd_secsize(struct vbd *vbd);
-+
-+struct phys_req {
-+ unsigned short dev;
-+ unsigned short nr_sects;
-+ struct block_device *bdev;
-+ blkif_sector_t sector_number;
-+};
-+
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
-+
-+void blkif_interface_init(void);
-+
-+void blkif_deschedule(blkif_t *blkif);
-+
-+void blkif_xenbus_init(void);
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+
-+#endif /* __BLKIF__BACKEND__COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blktap/interface.c b/drivers/xen/blktap/interface.c
-new file mode 100644
-index 0000000..99cb7bf
---- /dev/null
-+++ b/drivers/xen/blktap/interface.c
-@@ -0,0 +1,146 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/interface.c
-+ *
-+ * Block-device interface management.
-+ *
-+ * Copyright (c) 2004, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <xen/evtchn.h>
-+
-+static kmem_cache_t *blkif_cachep;
-+
-+blkif_t *alloc_blkif(domid_t domid)
-+{
-+ blkif_t *blkif;
-+
-+ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-+ if (!blkif)
-+ return ERR_PTR(-ENOMEM);
-+
-+ memset(blkif, 0, sizeof(*blkif));
-+ blkif->domid = domid;
-+ blkif->status = DISCONNECTED;
-+ spin_lock_init(&blkif->blk_ring_lock);
-+ atomic_set(&blkif->refcnt, 1);
-+
-+ return blkif;
-+}
-+
-+static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
-+{
-+ struct gnttab_map_grant_ref op;
-+ int ret;
-+
-+ op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
-+ op.flags = GNTMAP_host_map;
-+ op.ref = shared_page;
-+ op.dom = blkif->domid;
-+
-+ lock_vm_area(blkif->blk_ring_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+ unlock_vm_area(blkif->blk_ring_area);
-+ BUG_ON(ret);
-+
-+ if (op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return op.status;
-+ }
-+
-+ blkif->shmem_ref = shared_page;
-+ blkif->shmem_handle = op.handle;
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_page(blkif_t *blkif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+ int ret;
-+
-+ op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
-+ op.handle = blkif->shmem_handle;
-+ op.dev_bus_addr = 0;
-+
-+ lock_vm_area(blkif->blk_ring_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+ unlock_vm_area(blkif->blk_ring_area);
-+ BUG_ON(ret);
-+}
-+
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
-+{
-+ blkif_sring_t *sring;
-+ int err;
-+ evtchn_op_t op = {
-+ .cmd = EVTCHNOP_bind_interdomain,
-+ .u.bind_interdomain.remote_dom = blkif->domid,
-+ .u.bind_interdomain.remote_port = evtchn };
-+
-+ if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
-+ return -ENOMEM;
-+
-+ err = map_frontend_page(blkif, shared_page);
-+ if (err) {
-+ free_vm_area(blkif->blk_ring_area);
-+ return err;
-+ }
-+
-+ err = HYPERVISOR_event_channel_op(&op);
-+ if (err) {
-+ unmap_frontend_page(blkif);
-+ free_vm_area(blkif->blk_ring_area);
-+ return err;
-+ }
-+
-+ blkif->evtchn = op.u.bind_interdomain.local_port;
-+
-+ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-+ BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
-+
-+ blkif->irq = bind_evtchn_to_irqhandler(
-+ blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
-+
-+ blkif->status = CONNECTED;
-+
-+ return 0;
-+}
-+
-+static void free_blkif(void *arg)
-+{
-+ blkif_t *blkif = (blkif_t *)arg;
-+
-+ if (blkif->irq)
-+ unbind_from_irqhandler(blkif->irq, blkif);
-+
-+ if (blkif->blk_ring.sring) {
-+ unmap_frontend_page(blkif);
-+ free_vm_area(blkif->blk_ring_area);
-+ blkif->blk_ring.sring = NULL;
-+ }
-+
-+ kmem_cache_free(blkif_cachep, blkif);
-+}
-+
-+void free_blkif_callback(blkif_t *blkif)
-+{
-+ INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
-+ schedule_work(&blkif->free_work);
-+}
-+
-+void __init blkif_interface_init(void)
-+{
-+ blkif_cachep = kmem_cache_create(
-+ "blkif_cache", sizeof(blkif_t), 0, 0, NULL, NULL);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/blktap/xenbus.c b/drivers/xen/blktap/xenbus.c
-new file mode 100644
-index 0000000..3dc9167
---- /dev/null
-+++ b/drivers/xen/blktap/xenbus.c
-@@ -0,0 +1,234 @@
-+/* Xenbus code for blkif tap
-+
-+ A Warfield.
-+
-+ Hastily modified from the oroginal backend code:
-+
-+ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 2 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; if not, write to the Free Software
-+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+*/
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
-+
-+struct backend_info
-+{
-+ struct xenbus_device *dev;
-+
-+ /* our communications channel */
-+ blkif_t *blkif;
-+
-+ long int frontend_id;
-+
-+ /* watch back end for changes */
-+ struct xenbus_watch backend_watch;
-+
-+ /* watch front end for changes */
-+ struct xenbus_watch watch;
-+ char *frontpath;
-+};
-+
-+static int blkback_remove(struct xenbus_device *dev)
-+{
-+ struct backend_info *be = dev->data;
-+
-+ if (be->watch.node)
-+ unregister_xenbus_watch(&be->watch);
-+ unregister_xenbus_watch(&be->backend_watch);
-+ if (be->blkif)
-+ blkif_put(be->blkif);
-+ kfree(be->frontpath);
-+ kfree(be);
-+ return 0;
-+}
-+
-+/* Front end tells us frame. */
-+static void frontend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ unsigned long ring_ref;
-+ unsigned int evtchn;
-+ int err;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, watch);
-+
-+ /* If other end is gone, delete ourself. */
-+ if (vec && !xenbus_exists(be->frontpath, "")) {
-+ xenbus_rm(be->dev->nodename, "");
-+ device_unregister(&be->dev->dev);
-+ return;
-+ }
-+ if (be->blkif == NULL || be->blkif->status == CONNECTED)
-+ return;
-+
-+ err = xenbus_gather(be->frontpath, "ring-ref", "%lu", &ring_ref,
-+ "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_error(be->dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ be->frontpath);
-+ return;
-+ }
-+
-+ /* Map the shared frame, irq etc. */
-+ err = blkif_map(be->blkif, ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_error(be->dev, err, "mapping ring-ref %lu port %u",
-+ ring_ref, evtchn);
-+ goto abort;
-+ }
-+
-+ xenbus_dev_ok(be->dev);
-+
-+ return;
-+
-+abort:
-+ xenbus_transaction_end(1);
-+}
-+
-+/*
-+ Setup supplies physical device.
-+ We provide event channel and device details to front end.
-+ Frontend supplies shared frame and event channel.
-+ */
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ int err;
-+ char *p;
-+ long int handle;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, backend_watch);
-+ struct xenbus_device *dev = be->dev;
-+
-+ if (be->blkif == NULL) {
-+ /* Front end dir is a number, which is used as the handle. */
-+ p = strrchr(be->frontpath, '/') + 1;
-+ handle = simple_strtoul(p, NULL, 0);
-+
-+ be->blkif = alloc_blkif(be->frontend_id);
-+ if (IS_ERR(be->blkif)) {
-+ err = PTR_ERR(be->blkif);
-+ be->blkif = NULL;
-+ xenbus_dev_error(dev, err, "creating block interface");
-+ return;
-+ }
-+
-+ /* Pass in NULL node to skip exist test. */
-+ frontend_changed(&be->watch, NULL, 0);
-+ }
-+}
-+
-+static int blkback_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ struct backend_info *be;
-+ char *frontend;
-+ int err;
-+
-+ be = kmalloc(sizeof(*be), GFP_KERNEL);
-+ if (!be) {
-+ xenbus_dev_error(dev, -ENOMEM, "allocating backend structure");
-+ return -ENOMEM;
-+ }
-+ memset(be, 0, sizeof(*be));
-+
-+ frontend = NULL;
-+ err = xenbus_gather(dev->nodename,
-+ "frontend-id", "%li", &be->frontend_id,
-+ "frontend", NULL, &frontend,
-+ NULL);
-+ if (XENBUS_EXIST_ERR(err))
-+ goto free_be;
-+ if (err < 0) {
-+ xenbus_dev_error(dev, err,
-+ "reading %s/frontend or frontend-id",
-+ dev->nodename);
-+ goto free_be;
-+ }
-+ if (strlen(frontend) == 0 || !xenbus_exists(frontend, "")) {
-+ /* If we can't get a frontend path and a frontend-id,
-+ * then our bus-id is no longer valid and we need to
-+ * destroy the backend device.
-+ */
-+ err = -ENOENT;
-+ goto free_be;
-+ }
-+
-+ be->dev = dev;
-+ be->backend_watch.node = dev->nodename;
-+ be->backend_watch.callback = backend_changed;
-+ /* Registration implicitly fires backend_changed once */
-+ err = register_xenbus_watch(&be->backend_watch);
-+ if (err) {
-+ be->backend_watch.node = NULL;
-+ xenbus_dev_error(dev, err, "adding backend watch on %s",
-+ dev->nodename);
-+ goto free_be;
-+ }
-+
-+ be->frontpath = frontend;
-+ be->watch.node = be->frontpath;
-+ be->watch.callback = frontend_changed;
-+ err = register_xenbus_watch(&be->watch);
-+ if (err) {
-+ be->watch.node = NULL;
-+ xenbus_dev_error(dev, err,
-+ "adding frontend watch on %s",
-+ be->frontpath);
-+ goto free_be;
-+ }
-+
-+ dev->data = be;
-+ return 0;
-+
-+ free_be:
-+ if (be->backend_watch.node)
-+ unregister_xenbus_watch(&be->backend_watch);
-+ kfree(frontend);
-+ kfree(be);
-+ return err;
-+}
-+
-+static struct xenbus_device_id blkback_ids[] = {
-+ { "vbd" },
-+ { "" }
-+};
-+
-+static struct xenbus_driver blkback = {
-+ .name = "vbd",
-+ .owner = THIS_MODULE,
-+ .ids = blkback_ids,
-+ .probe = blkback_probe,
-+ .remove = blkback_remove,
-+};
-+
-+void blkif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&blkback);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/char/Makefile b/drivers/xen/char/Makefile
-new file mode 100644
-index 0000000..c73925e
---- /dev/null
-+++ b/drivers/xen/char/Makefile
-@@ -0,0 +1,2 @@
-+
-+obj-y := mem.o
-diff --git a/drivers/xen/char/mem.c b/drivers/xen/char/mem.c
-new file mode 100644
-index 0000000..61d7991
---- /dev/null
-+++ b/drivers/xen/char/mem.c
-@@ -0,0 +1,192 @@
-+/*
-+ * Originally from linux/drivers/char/mem.c
-+ *
-+ * Copyright (C) 1991, 1992 Linus Torvalds
-+ *
-+ * Added devfs support.
-+ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
-+ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <linux/miscdevice.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mman.h>
-+#include <linux/random.h>
-+#include <linux/init.h>
-+#include <linux/raw.h>
-+#include <linux/tty.h>
-+#include <linux/capability.h>
-+#include <linux/smp_lock.h>
-+#include <linux/devfs_fs_kernel.h>
-+#include <linux/ptrace.h>
-+#include <linux/device.h>
-+#include <asm/pgalloc.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/hypervisor.h>
-+
-+static inline int uncached_access(struct file *file)
-+{
-+ if (file->f_flags & O_SYNC)
-+ return 1;
-+ /* Xen sets correct MTRR type on non-RAM for us. */
-+ return 0;
-+}
-+
-+/*
-+ * This funcion reads the *physical* memory. The f_pos points directly to the
-+ * memory location.
-+ */
-+static ssize_t read_mem(struct file * file, char __user * buf,
-+ size_t count, loff_t *ppos)
-+{
-+ unsigned long p = *ppos, ignored;
-+ ssize_t read = 0, sz;
-+ void __iomem *v;
-+
-+ while (count > 0) {
-+ /*
-+ * Handle first page in case it's not aligned
-+ */
-+ if (-p & (PAGE_SIZE - 1))
-+ sz = -p & (PAGE_SIZE - 1);
-+ else
-+ sz = PAGE_SIZE;
-+
-+ sz = min_t(unsigned long, sz, count);
-+
-+ if ((v = ioremap(p, sz)) == NULL) {
-+ /*
-+ * Some programs (e.g., dmidecode) groove off into weird RAM
-+ * areas where no tables can possibly exist (because Xen will
-+ * have stomped on them!). These programs get rather upset if
-+ * we let them know that Xen failed their access, so we fake
-+ * out a read of all zeroes. :-)
-+ */
-+ if (clear_user(buf, count))
-+ return -EFAULT;
-+ read += count;
-+ break;
-+ }
-+
-+ ignored = copy_to_user(buf, v, sz);
-+ iounmap(v);
-+ if (ignored)
-+ return -EFAULT;
-+ buf += sz;
-+ p += sz;
-+ count -= sz;
-+ read += sz;
-+ }
-+
-+ *ppos += read;
-+ return read;
-+}
-+
-+static ssize_t write_mem(struct file * file, const char __user * buf,
-+ size_t count, loff_t *ppos)
-+{
-+ unsigned long p = *ppos, ignored;
-+ ssize_t written = 0, sz;
-+ void __iomem *v;
-+
-+ while (count > 0) {
-+ /*
-+ * Handle first page in case it's not aligned
-+ */
-+ if (-p & (PAGE_SIZE - 1))
-+ sz = -p & (PAGE_SIZE - 1);
-+ else
-+ sz = PAGE_SIZE;
-+
-+ sz = min_t(unsigned long, sz, count);
-+
-+ if ((v = ioremap(p, sz)) == NULL)
-+ break;
-+
-+ ignored = copy_from_user(v, buf, sz);
-+ iounmap(v);
-+ if (ignored) {
-+ written += sz - ignored;
-+ if (written)
-+ break;
-+ return -EFAULT;
-+ }
-+ buf += sz;
-+ p += sz;
-+ count -= sz;
-+ written += sz;
-+ }
-+
-+ *ppos += written;
-+ return written;
-+}
-+
-+static int mmap_mem(struct file * file, struct vm_area_struct * vma)
-+{
-+ size_t size = vma->vm_end - vma->vm_start;
-+
-+ if (uncached_access(file))
-+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-+
-+ /* We want to return the real error code, not EAGAIN. */
-+ return direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-+ size, vma->vm_page_prot, DOMID_IO);
-+}
-+
-+/*
-+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
-+ * check against negative addresses: they are ok. The return value is weird,
-+ * though, in that case (0).
-+ *
-+ * also note that seeking relative to the "end of file" isn't supported:
-+ * it has no meaning, so it returns -EINVAL.
-+ */
-+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
-+{
-+ loff_t ret;
-+
-+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
-+ switch (orig) {
-+ case 0:
-+ file->f_pos = offset;
-+ ret = file->f_pos;
-+ force_successful_syscall_return();
-+ break;
-+ case 1:
-+ file->f_pos += offset;
-+ ret = file->f_pos;
-+ force_successful_syscall_return();
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ }
-+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
-+ return ret;
-+}
-+
-+static int open_mem(struct inode * inode, struct file * filp)
-+{
-+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-+}
-+
-+struct file_operations mem_fops = {
-+ .llseek = memory_lseek,
-+ .read = read_mem,
-+ .write = write_mem,
-+ .mmap = mmap_mem,
-+ .open = open_mem,
-+};
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/console/Makefile b/drivers/xen/console/Makefile
-new file mode 100644
-index 0000000..35de3e9
---- /dev/null
-+++ b/drivers/xen/console/Makefile
-@@ -0,0 +1,2 @@
-+
-+obj-y := console.o xencons_ring.o
-diff --git a/drivers/xen/console/console.c b/drivers/xen/console/console.c
-new file mode 100644
-index 0000000..ae8255c
---- /dev/null
-+++ b/drivers/xen/console/console.c
-@@ -0,0 +1,643 @@
-+/******************************************************************************
-+ * console.c
-+ *
-+ * Virtual console driver.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser.
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/major.h>
-+#include <linux/ptrace.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/init.h>
-+#include <linux/console.h>
-+#include <linux/bootmem.h>
-+#include <linux/sysrq.h>
-+#include <asm/io.h>
-+#include <asm/irq.h>
-+#include <asm/uaccess.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/event_channel.h>
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/xencons.h>
-+
-+/*
-+ * Modes:
-+ * 'xencons=off' [XC_OFF]: Console is disabled.
-+ * 'xencons=tty' [XC_TTY]: Console attached to '/dev/tty[0-9]+'.
-+ * 'xencons=ttyS' [XC_SERIAL]: Console attached to '/dev/ttyS[0-9]+'.
-+ * [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
-+ *
-+ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
-+ * warnings from standard distro startup scripts.
-+ */
-+static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
-+static int xc_num = -1;
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static unsigned long sysrq_requested;
-+extern int sysrq_enabled;
-+#endif
-+
-+static int __init xencons_setup(char *str)
-+{
-+ char *q;
-+ int n;
-+
-+ if (!strncmp(str, "ttyS", 4))
-+ xc_mode = XC_SERIAL;
-+ else if (!strncmp(str, "tty", 3))
-+ xc_mode = XC_TTY;
-+ else if (!strncmp(str, "off", 3))
-+ xc_mode = XC_OFF;
-+
-+ switch (xc_mode) {
-+ case XC_SERIAL:
-+ n = simple_strtol(str+4, &q, 10);
-+ if (q > (str + 4))
-+ xc_num = n;
-+ break;
-+ case XC_TTY:
-+ n = simple_strtol(str+3, &q, 10);
-+ if (q > (str + 3))
-+ xc_num = n;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return 1;
-+}
-+__setup("xencons=", xencons_setup);
-+
-+/* The kernel and user-land drivers share a common transmit buffer. */
-+static unsigned int wbuf_size = 4096;
-+#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
-+static char *wbuf;
-+static unsigned int wc, wp; /* write_cons, write_prod */
-+
-+static int __init xencons_bufsz_setup(char *str)
-+{
-+ unsigned int goal;
-+ goal = simple_strtoul(str, NULL, 0);
-+ while (wbuf_size < goal)
-+ wbuf_size <<= 1;
-+ return 1;
-+}
-+__setup("xencons_bufsz=", xencons_bufsz_setup);
-+
-+/* This lock protects accesses to the common transmit buffer. */
-+static spinlock_t xencons_lock = SPIN_LOCK_UNLOCKED;
-+
-+/* Common transmit-kick routine. */
-+static void __xencons_tx_flush(void);
-+
-+static struct tty_driver *xencons_driver;
-+
-+/******************** Kernel console driver ********************************/
-+
-+static void kcons_write(
-+ struct console *c, const char *s, unsigned int count)
-+{
-+ int i = 0;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+
-+ while (i < count) {
-+ for (; i < count; i++) {
-+ if ((wp - wc) >= (wbuf_size - 1))
-+ break;
-+ if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
-+ wbuf[WBUF_MASK(wp++)] = '\r';
-+ }
-+
-+ __xencons_tx_flush();
-+ }
-+
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void kcons_write_dom0(
-+ struct console *c, const char *s, unsigned int count)
-+{
-+ int rc;
-+
-+ while ((count > 0) &&
-+ ((rc = HYPERVISOR_console_io(
-+ CONSOLEIO_write, count, (char *)s)) > 0)) {
-+ count -= rc;
-+ s += rc;
-+ }
-+}
-+
-+static struct tty_driver *kcons_device(struct console *c, int *index)
-+{
-+ *index = 0;
-+ return xencons_driver;
-+}
-+
-+static struct console kcons_info = {
-+ .device = kcons_device,
-+ .flags = CON_PRINTBUFFER,
-+ .index = -1,
-+};
-+
-+#define __RETCODE 0
-+static int __init xen_console_init(void)
-+{
-+ if (xen_init() < 0)
-+ return __RETCODE;
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN) {
-+ if (xc_mode == XC_DEFAULT)
-+ xc_mode = XC_SERIAL;
-+ kcons_info.write = kcons_write_dom0;
-+ if (xc_mode == XC_SERIAL)
-+ kcons_info.flags |= CON_ENABLED;
-+ } else {
-+ if (xc_mode == XC_DEFAULT)
-+ xc_mode = XC_TTY;
-+ kcons_info.write = kcons_write;
-+ }
-+
-+ switch (xc_mode) {
-+ case XC_SERIAL:
-+ strcpy(kcons_info.name, "ttyS");
-+ if (xc_num == -1)
-+ xc_num = 0;
-+ break;
-+
-+ case XC_TTY:
-+ strcpy(kcons_info.name, "tty");
-+ if (xc_num == -1)
-+ xc_num = 1;
-+ break;
-+
-+ default:
-+ return __RETCODE;
-+ }
-+
-+ wbuf = alloc_bootmem(wbuf_size);
-+
-+ register_console(&kcons_info);
-+
-+ return __RETCODE;
-+}
-+console_initcall(xen_console_init);
-+
-+/*** Useful function for console debugging -- goes straight to Xen. ***/
-+asmlinkage int xprintk(const char *fmt, ...)
-+{
-+ va_list args;
-+ int printk_len;
-+ static char printk_buf[1024];
-+
-+ /* Emit the output into the temporary buffer */
-+ va_start(args, fmt);
-+ printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
-+ va_end(args);
-+
-+ /* Send the processed output directly to Xen. */
-+ kcons_write_dom0(NULL, printk_buf, printk_len);
-+
-+ return 0;
-+}
-+
-+/*** Forcibly flush console data before dying. ***/
-+void xencons_force_flush(void)
-+{
-+ int sz;
-+
-+ /* Emergency console is synchronous, so there's nothing to flush. */
-+ if (xen_start_info->flags & SIF_INITDOMAIN)
-+ return;
-+
-+ /* Spin until console data is flushed through to the daemon. */
-+ while (wc != wp) {
-+ int sent = 0;
-+ if ((sz = wp - wc) == 0)
-+ continue;
-+ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-+ if (sent > 0)
-+ wc += sent;
-+ }
-+}
-+
-+
-+/******************** User-space console driver (/dev/console) ************/
-+
-+#define DRV(_d) (_d)
-+#define TTY_INDEX(_tty) ((_tty)->index)
-+
-+static struct termios *xencons_termios[MAX_NR_CONSOLES];
-+static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
-+static struct tty_struct *xencons_tty;
-+static int xencons_priv_irq;
-+static char x_char;
-+
-+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
-+{
-+ int i;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ if (xencons_tty == NULL)
-+ goto out;
-+
-+ for (i = 0; i < len; i++) {
-+#ifdef CONFIG_MAGIC_SYSRQ
-+ if (sysrq_enabled) {
-+ if (buf[i] == '\x0f') { /* ^O */
-+ sysrq_requested = jiffies;
-+ continue; /* don't print the sysrq key */
-+ } else if (sysrq_requested) {
-+ unsigned long sysrq_timeout =
-+ sysrq_requested + HZ*2;
-+ sysrq_requested = 0;
-+ if (time_before(jiffies, sysrq_timeout)) {
-+ spin_unlock_irqrestore(
-+ &xencons_lock, flags);
-+ handle_sysrq(
-+ buf[i], regs, xencons_tty);
-+ spin_lock_irqsave(
-+ &xencons_lock, flags);
-+ continue;
-+ }
-+ }
-+ }
-+#endif
-+ tty_insert_flip_char(xencons_tty, buf[i], 0);
-+ }
-+ tty_flip_buffer_push(xencons_tty);
-+
-+ out:
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void __xencons_tx_flush(void)
-+{
-+ int sent, sz, work_done = 0;
-+
-+ if (x_char) {
-+ if (xen_start_info->flags & SIF_INITDOMAIN)
-+ kcons_write_dom0(NULL, &x_char, 1);
-+ else
-+ while (x_char)
-+ if (xencons_ring_send(&x_char, 1) == 1)
-+ break;
-+ x_char = 0;
-+ work_done = 1;
-+ }
-+
-+ while (wc != wp) {
-+ sz = wp - wc;
-+ if (sz > (wbuf_size - WBUF_MASK(wc)))
-+ sz = wbuf_size - WBUF_MASK(wc);
-+ if (xen_start_info->flags & SIF_INITDOMAIN) {
-+ kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
-+ wc += sz;
-+ } else {
-+ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-+ if (sent == 0)
-+ break;
-+ wc += sent;
-+ }
-+ work_done = 1;
-+ }
-+
-+ if (work_done && (xencons_tty != NULL)) {
-+ wake_up_interruptible(&xencons_tty->write_wait);
-+ if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-+ (xencons_tty->ldisc.write_wakeup != NULL))
-+ (xencons_tty->ldisc.write_wakeup)(xencons_tty);
-+ }
-+}
-+
-+void xencons_tx(void)
-+{
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ __xencons_tx_flush();
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+/* Privileged receive callback and transmit kicker. */
-+static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
-+ struct pt_regs *regs)
-+{
-+ static char rbuf[16];
-+ int l;
-+
-+ while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
-+ xencons_rx(rbuf, l, regs);
-+
-+ xencons_tx();
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static int xencons_write_room(struct tty_struct *tty)
-+{
-+ return wbuf_size - (wp - wc);
-+}
-+
-+static int xencons_chars_in_buffer(struct tty_struct *tty)
-+{
-+ return wp - wc;
-+}
-+
-+static void xencons_send_xchar(struct tty_struct *tty, char ch)
-+{
-+ unsigned long flags;
-+
-+ if (TTY_INDEX(tty) != 0)
-+ return;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ x_char = ch;
-+ __xencons_tx_flush();
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void xencons_throttle(struct tty_struct *tty)
-+{
-+ if (TTY_INDEX(tty) != 0)
-+ return;
-+
-+ if (I_IXOFF(tty))
-+ xencons_send_xchar(tty, STOP_CHAR(tty));
-+}
-+
-+static void xencons_unthrottle(struct tty_struct *tty)
-+{
-+ if (TTY_INDEX(tty) != 0)
-+ return;
-+
-+ if (I_IXOFF(tty)) {
-+ if (x_char != 0)
-+ x_char = 0;
-+ else
-+ xencons_send_xchar(tty, START_CHAR(tty));
-+ }
-+}
-+
-+static void xencons_flush_buffer(struct tty_struct *tty)
-+{
-+ unsigned long flags;
-+
-+ if (TTY_INDEX(tty) != 0)
-+ return;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ wc = wp = 0;
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static inline int __xencons_put_char(int ch)
-+{
-+ char _ch = (char)ch;
-+ if ((wp - wc) == wbuf_size)
-+ return 0;
-+ wbuf[WBUF_MASK(wp++)] = _ch;
-+ return 1;
-+}
-+
-+static int xencons_write(
-+ struct tty_struct *tty,
-+ const unsigned char *buf,
-+ int count)
-+{
-+ int i;
-+ unsigned long flags;
-+
-+ if (TTY_INDEX(tty) != 0)
-+ return count;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+
-+ for (i = 0; i < count; i++)
-+ if (!__xencons_put_char(buf[i]))
-+ break;
-+
-+ if (i != 0)
-+ __xencons_tx_flush();
-+
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+
-+ return i;
-+}
-+
-+static void xencons_put_char(struct tty_struct *tty, u_char ch)
-+{
-+ unsigned long flags;
-+
-+ if (TTY_INDEX(tty) != 0)
-+ return;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ (void)__xencons_put_char(ch);
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void xencons_flush_chars(struct tty_struct *tty)
-+{
-+ unsigned long flags;
-+
-+ if (TTY_INDEX(tty) != 0)
-+ return;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ __xencons_tx_flush();
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
-+{
-+ unsigned long orig_jiffies = jiffies;
-+
-+ if (TTY_INDEX(tty) != 0)
-+ return;
-+
-+ while (DRV(tty->driver)->chars_in_buffer(tty)) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ schedule_timeout(1);
-+ if (signal_pending(current))
-+ break;
-+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
-+ break;
-+ }
-+
-+ set_current_state(TASK_RUNNING);
-+}
-+
-+static int xencons_open(struct tty_struct *tty, struct file *filp)
-+{
-+ unsigned long flags;
-+
-+ if (TTY_INDEX(tty) != 0)
-+ return 0;
-+
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ tty->driver_data = NULL;
-+ if (xencons_tty == NULL)
-+ xencons_tty = tty;
-+ __xencons_tx_flush();
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+
-+ return 0;
-+}
-+
-+static void xencons_close(struct tty_struct *tty, struct file *filp)
-+{
-+ unsigned long flags;
-+
-+ if (TTY_INDEX(tty) != 0)
-+ return;
-+
-+ if (tty->count == 1) {
-+ tty->closing = 1;
-+ tty_wait_until_sent(tty, 0);
-+ if (DRV(tty->driver)->flush_buffer != NULL)
-+ DRV(tty->driver)->flush_buffer(tty);
-+ if (tty->ldisc.flush_buffer != NULL)
-+ tty->ldisc.flush_buffer(tty);
-+ tty->closing = 0;
-+ spin_lock_irqsave(&xencons_lock, flags);
-+ xencons_tty = NULL;
-+ spin_unlock_irqrestore(&xencons_lock, flags);
-+ }
-+}
-+
-+static struct tty_operations xencons_ops = {
-+ .open = xencons_open,
-+ .close = xencons_close,
-+ .write = xencons_write,
-+ .write_room = xencons_write_room,
-+ .put_char = xencons_put_char,
-+ .flush_chars = xencons_flush_chars,
-+ .chars_in_buffer = xencons_chars_in_buffer,
-+ .send_xchar = xencons_send_xchar,
-+ .flush_buffer = xencons_flush_buffer,
-+ .throttle = xencons_throttle,
-+ .unthrottle = xencons_unthrottle,
-+ .wait_until_sent = xencons_wait_until_sent,
-+};
-+
-+static int __init xencons_init(void)
-+{
-+ int rc;
-+
-+ if (xen_init() < 0)
-+ return -ENODEV;
-+
-+ if (xc_mode == XC_OFF)
-+ return 0;
-+
-+ xencons_ring_init();
-+
-+ xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ?
-+ 1 : MAX_NR_CONSOLES);
-+ if (xencons_driver == NULL)
-+ return -ENOMEM;
-+
-+ DRV(xencons_driver)->name = "xencons";
-+ DRV(xencons_driver)->major = TTY_MAJOR;
-+ DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
-+ DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
-+ DRV(xencons_driver)->init_termios = tty_std_termios;
-+ DRV(xencons_driver)->flags =
-+ TTY_DRIVER_REAL_RAW |
-+ TTY_DRIVER_RESET_TERMIOS |
-+ TTY_DRIVER_NO_DEVFS;
-+ DRV(xencons_driver)->termios = xencons_termios;
-+ DRV(xencons_driver)->termios_locked = xencons_termios_locked;
-+
-+ if (xc_mode == XC_SERIAL) {
-+ DRV(xencons_driver)->name = "ttyS";
-+ DRV(xencons_driver)->minor_start = 64 + xc_num;
-+ DRV(xencons_driver)->name_base = 0 + xc_num;
-+ } else {
-+ DRV(xencons_driver)->name = "tty";
-+ DRV(xencons_driver)->minor_start = xc_num;
-+ DRV(xencons_driver)->name_base = xc_num;
-+ }
-+
-+ tty_set_operations(xencons_driver, &xencons_ops);
-+
-+ if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
-+ printk("WARNING: Failed to register Xen virtual "
-+ "console driver as '%s%d'\n",
-+ DRV(xencons_driver)->name,
-+ DRV(xencons_driver)->name_base);
-+ put_tty_driver(xencons_driver);
-+ xencons_driver = NULL;
-+ return rc;
-+ }
-+
-+ tty_register_device(xencons_driver, 0, NULL);
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN) {
-+ xencons_priv_irq = bind_virq_to_irqhandler(
-+ VIRQ_CONSOLE,
-+ 0,
-+ xencons_priv_interrupt,
-+ 0,
-+ "console",
-+ NULL);
-+ BUG_ON(xencons_priv_irq < 0);
-+ }
-+
-+ printk("Xen virtual console successfully installed as %s%d\n",
-+ DRV(xencons_driver)->name,
-+ DRV(xencons_driver)->name_base );
-+
-+ return 0;
-+}
-+
-+module_init(xencons_init);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/console/xencons_ring.c b/drivers/xen/console/xencons_ring.c
-new file mode 100644
-index 0000000..8abb0a4
---- /dev/null
-+++ b/drivers/xen/console/xencons_ring.c
-@@ -0,0 +1,125 @@
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/major.h>
-+#include <linux/ptrace.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <xen/xencons.h>
-+#include <linux/wait.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/err.h>
-+#include <xen/interface/io/console.h>
-+
-+static int xencons_irq;
-+
-+static inline struct xencons_interface *xencons_interface(void)
-+{
-+ return mfn_to_virt(xen_start_info->console_mfn);
-+}
-+
-+static inline void notify_daemon(void)
-+{
-+ /* Use evtchn: this is called early, before irq is set up. */
-+ notify_remote_via_evtchn(xen_start_info->console_evtchn);
-+}
-+
-+int xencons_ring_send(const char *data, unsigned len)
-+{
-+ int sent = 0;
-+ struct xencons_interface *intf = xencons_interface();
-+ XENCONS_RING_IDX cons, prod;
-+
-+ cons = intf->out_cons;
-+ prod = intf->out_prod;
-+ mb();
-+ BUG_ON((prod - cons) > sizeof(intf->out));
-+
-+ while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
-+ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
-+
-+ wmb();
-+ intf->out_prod = prod;
-+
-+ notify_daemon();
-+
-+ return sent;
-+}
-+
-+static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
-+{
-+ struct xencons_interface *intf = xencons_interface();
-+ XENCONS_RING_IDX cons, prod;
-+
-+ cons = intf->in_cons;
-+ prod = intf->in_prod;
-+ mb();
-+ BUG_ON((prod - cons) > sizeof(intf->in));
-+
-+ while (cons != prod) {
-+ xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
-+ cons++;
-+ }
-+
-+ mb();
-+ intf->in_cons = cons;
-+
-+ notify_daemon();
-+
-+ xencons_tx();
-+
-+ return IRQ_HANDLED;
-+}
-+
-+int xencons_ring_init(void)
-+{
-+ int err;
-+
-+ if (xencons_irq)
-+ unbind_from_irqhandler(xencons_irq, NULL);
-+ xencons_irq = 0;
-+
-+ if (!xen_start_info->console_evtchn)
-+ return 0;
-+
-+ err = bind_evtchn_to_irqhandler(
-+ xen_start_info->console_evtchn,
-+ handle_input, 0, "xencons", NULL);
-+ if (err <= 0) {
-+ printk(KERN_ERR "XEN console request irq failed %i\n", err);
-+ return err;
-+ }
-+
-+ xencons_irq = err;
-+
-+ /* In case we have in-flight data after save/restore... */
-+ notify_daemon();
-+
-+ return 0;
-+}
-+
-+void xencons_resume(void)
-+{
-+ (void)xencons_ring_init();
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/core/Makefile b/drivers/xen/core/Makefile
-new file mode 100644
-index 0000000..1d57a5b
---- /dev/null
-+++ b/drivers/xen/core/Makefile
-@@ -0,0 +1,9 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+obj-y := evtchn.o reboot.o gnttab.o features.o
-+
-+obj-$(CONFIG_PROC_FS) += xen_proc.o
-+obj-$(CONFIG_NET) += skbuff.o
-+obj-$(CONFIG_SMP) += smpboot.o
-diff --git a/drivers/xen/core/evtchn.c b/drivers/xen/core/evtchn.c
-new file mode 100644
-index 0000000..58f08c6
---- /dev/null
-+++ b/drivers/xen/core/evtchn.c
-@@ -0,0 +1,822 @@
-+/******************************************************************************
-+ * evtchn.c
-+ *
-+ * Communication via Xen event channels.
-+ *
-+ * Copyright (c) 2002-2005, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/version.h>
-+#include <asm/atomic.h>
-+#include <asm/system.h>
-+#include <asm/ptrace.h>
-+#include <asm/synch_bitops.h>
-+#include <xen/interface/event_channel.h>
-+#include <xen/interface/physdev.h>
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <linux/mc146818rtc.h> /* RTC_IRQ */
-+
-+/*
-+ * This lock protects updates to the following mapping and reference-count
-+ * arrays. The lock does not need to be acquired to read the mapping tables.
-+ */
-+static spinlock_t irq_mapping_update_lock;
-+
-+/* IRQ <-> event-channel mappings. */
-+static int evtchn_to_irq[NR_EVENT_CHANNELS];
-+
-+/* Packed IRQ information: binding type, sub-type index, and event channel. */
-+static u32 irq_info[NR_IRQS];
-+/* Binding types. */
-+enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
-+/* Constructor for packed IRQ information. */
-+#define mk_irq_info(type, index, evtchn) \
-+ (((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
-+/* Convenient shorthand for packed representation of an unbound IRQ. */
-+#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
-+/* Accessor macros for packed IRQ information. */
-+#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
-+#define index_from_irq(irq) ((u8)(irq_info[irq] >> 16))
-+#define type_from_irq(irq) ((u8)(irq_info[irq] >> 24))
-+
-+/* IRQ <-> VIRQ mapping. */
-+DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
-+
-+/* IRQ <-> IPI mapping. */
-+#ifndef NR_IPIS
-+#define NR_IPIS 1
-+#endif
-+DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-+
-+/* Reference counts for bindings to IRQs. */
-+static int irq_bindcount[NR_IRQS];
-+
-+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-+static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
-+
-+#ifdef CONFIG_SMP
-+
-+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
-+static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
-+
-+#define active_evtchns(cpu,sh,idx) \
-+ ((sh)->evtchn_pending[idx] & \
-+ cpu_evtchn_mask[cpu][idx] & \
-+ ~(sh)->evtchn_mask[idx])
-+
-+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
-+{
-+ clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
-+ set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
-+ cpu_evtchn[chn] = cpu;
-+}
-+
-+static void init_evtchn_cpu_bindings(void)
-+{
-+ /* By default all event channels notify CPU#0. */
-+ memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
-+ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
-+}
-+
-+#define cpu_from_evtchn(evtchn) (cpu_evtchn[evtchn])
-+
-+#else
-+
-+#define active_evtchns(cpu,sh,idx) \
-+ ((sh)->evtchn_pending[idx] & \
-+ ~(sh)->evtchn_mask[idx])
-+#define bind_evtchn_to_cpu(chn,cpu) ((void)0)
-+#define init_evtchn_cpu_bindings() ((void)0)
-+#define cpu_from_evtchn(evtchn) (0)
-+
-+#endif
-+
-+/* Upcall to generic IRQ layer. */
-+#ifdef CONFIG_X86
-+extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
-+#if defined (__i386__)
-+static inline void exit_idle(void) {}
-+#define IRQ_REG orig_eax
-+#elif defined (__x86_64__)
-+#include <asm/idle.h>
-+#define IRQ_REG orig_rax
-+#endif
-+#define do_IRQ(irq, regs) do { \
-+ (regs)->IRQ_REG = (irq); \
-+ do_IRQ((regs)); \
-+} while (0)
-+#endif
-+
-+/* Xen will never allocate port zero for any purpose. */
-+#define VALID_EVTCHN(chn) ((chn) != 0)
-+
-+/*
-+ * Force a proper event-channel callback from Xen after clearing the
-+ * callback mask. We do this in a very simple manner, by making a call
-+ * down into Xen. The pending flag will be checked by Xen on return.
-+ */
-+void force_evtchn_callback(void)
-+{
-+ (void)HYPERVISOR_xen_version(0, NULL);
-+}
-+EXPORT_SYMBOL(force_evtchn_callback);
-+
-+/* NB. Interrupts are disabled on entry. */
-+asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
-+{
-+ unsigned long l1, l2;
-+ unsigned int l1i, l2i, port;
-+ int irq, cpu = smp_processor_id();
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-+
-+ vcpu_info->evtchn_upcall_pending = 0;
-+
-+ /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
-+ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-+ while (l1 != 0) {
-+ l1i = __ffs(l1);
-+ l1 &= ~(1UL << l1i);
-+
-+ while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
-+ l2i = __ffs(l2);
-+
-+ port = (l1i * BITS_PER_LONG) + l2i;
-+ if ((irq = evtchn_to_irq[port]) != -1)
-+ do_IRQ(irq, regs);
-+ else {
-+ exit_idle();
-+ evtchn_device_upcall(port);
-+ }
-+ }
-+ }
-+}
-+
-+static int find_unbound_irq(void)
-+{
-+ int irq;
-+
-+ for (irq = 0; irq < NR_IRQS; irq++)
-+ if (irq_bindcount[irq] == 0)
-+ break;
-+
-+ if (irq == NR_IRQS)
-+ panic("No available IRQ to bind to: increase NR_IRQS!\n");
-+
-+ return irq;
-+}
-+
-+static int bind_evtchn_to_irq(unsigned int evtchn)
-+{
-+ int irq;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if ((irq = evtchn_to_irq[evtchn]) == -1) {
-+ irq = find_unbound_irq();
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
-+ }
-+
-+ irq_bindcount[irq]++;
-+
-+ spin_unlock(&irq_mapping_update_lock);
-+
-+ return irq;
-+}
-+
-+static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
-+{
-+ evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
-+ int evtchn, irq;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
-+ op.u.bind_virq.virq = virq;
-+ op.u.bind_virq.vcpu = cpu;
-+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+ evtchn = op.u.bind_virq.port;
-+
-+ irq = find_unbound_irq();
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+
-+ per_cpu(virq_to_irq, cpu)[virq] = irq;
-+
-+ bind_evtchn_to_cpu(evtchn, cpu);
-+ }
-+
-+ irq_bindcount[irq]++;
-+
-+ spin_unlock(&irq_mapping_update_lock);
-+
-+ return irq;
-+}
-+
-+static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
-+{
-+ evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
-+ int evtchn, irq;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
-+ op.u.bind_ipi.vcpu = cpu;
-+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+ evtchn = op.u.bind_ipi.port;
-+
-+ irq = find_unbound_irq();
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+
-+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
-+
-+ bind_evtchn_to_cpu(evtchn, cpu);
-+ }
-+
-+ irq_bindcount[irq]++;
-+
-+ spin_unlock(&irq_mapping_update_lock);
-+
-+ return irq;
-+}
-+
-+static void unbind_from_irq(unsigned int irq)
-+{
-+ evtchn_op_t op = { .cmd = EVTCHNOP_close };
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
-+ op.u.close.port = evtchn;
-+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+
-+ switch (type_from_irq(irq)) {
-+ case IRQT_VIRQ:
-+ per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
-+ [index_from_irq(irq)] = -1;
-+ break;
-+ case IRQT_IPI:
-+ per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
-+ [index_from_irq(irq)] = -1;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ /* Closed ports are implicitly re-bound to VCPU0. */
-+ bind_evtchn_to_cpu(evtchn, 0);
-+
-+ evtchn_to_irq[evtchn] = -1;
-+ irq_info[irq] = IRQ_UNBOUND;
-+ }
-+
-+ spin_unlock(&irq_mapping_update_lock);
-+}
-+
-+int bind_evtchn_to_irqhandler(
-+ unsigned int evtchn,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ unsigned int irq;
-+ int retval;
-+
-+ irq = bind_evtchn_to_irq(evtchn);
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
-+
-+ return irq;
-+}
-+EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
-+
-+int bind_virq_to_irqhandler(
-+ unsigned int virq,
-+ unsigned int cpu,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ unsigned int irq;
-+ int retval;
-+
-+ irq = bind_virq_to_irq(virq, cpu);
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
-+
-+ return irq;
-+}
-+EXPORT_SYMBOL(bind_virq_to_irqhandler);
-+
-+int bind_ipi_to_irqhandler(
-+ unsigned int ipi,
-+ unsigned int cpu,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id)
-+{
-+ unsigned int irq;
-+ int retval;
-+
-+ irq = bind_ipi_to_irq(ipi, cpu);
-+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+ if (retval != 0) {
-+ unbind_from_irq(irq);
-+ return retval;
-+ }
-+
-+ return irq;
-+}
-+EXPORT_SYMBOL(bind_ipi_to_irqhandler);
-+
-+void unbind_from_irqhandler(unsigned int irq, void *dev_id)
-+{
-+ free_irq(irq, dev_id);
-+ unbind_from_irq(irq);
-+}
-+EXPORT_SYMBOL(unbind_from_irqhandler);
-+
-+#ifdef CONFIG_SMP
-+static void do_nothing_function(void *ign)
-+{
-+}
-+#endif
-+
-+/* Rebind an evtchn so that it gets delivered to a specific cpu */
-+static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
-+{
-+ evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
-+ int evtchn;
-+
-+ spin_lock(&irq_mapping_update_lock);
-+
-+ evtchn = evtchn_from_irq(irq);
-+ if (!VALID_EVTCHN(evtchn)) {
-+ spin_unlock(&irq_mapping_update_lock);
-+ return;
-+ }
-+
-+ /* Send future instances of this interrupt to other vcpu. */
-+ op.u.bind_vcpu.port = evtchn;
-+ op.u.bind_vcpu.vcpu = tcpu;
-+
-+ /*
-+ * If this fails, it usually just indicates that we're dealing with a
-+ * virq or IPI channel, which don't actually need to be rebound. Ignore
-+ * it, but don't do the xenlinux-level rebind in that case.
-+ */
-+ if (HYPERVISOR_event_channel_op(&op) >= 0)
-+ bind_evtchn_to_cpu(evtchn, tcpu);
-+
-+ spin_unlock(&irq_mapping_update_lock);
-+
-+ /*
-+ * Now send the new target processor a NOP IPI. When this returns, it
-+ * will check for any pending interrupts, and so service any that got
-+ * delivered to the wrong processor by mistake.
-+ *
-+ * XXX: The only time this is called with interrupts disabled is from
-+ * the hotplug/hotunplug path. In that case, all cpus are stopped with
-+ * interrupts disabled, and the missed interrupts will be picked up
-+ * when they start again. This is kind of a hack.
-+ */
-+ if (!irqs_disabled())
-+ smp_call_function(do_nothing_function, NULL, 0, 0);
-+}
-+
-+
-+static void set_affinity_irq(unsigned irq, cpumask_t dest)
-+{
-+ unsigned tcpu = first_cpu(dest);
-+ rebind_irq_to_cpu(irq, tcpu);
-+}
-+
-+/*
-+ * Interface to generic handling in irq.c
-+ */
-+
-+static unsigned int startup_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ unmask_evtchn(evtchn);
-+ return 0;
-+}
-+
-+static void shutdown_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ mask_evtchn(evtchn);
-+}
-+
-+static void enable_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ unmask_evtchn(evtchn);
-+}
-+
-+static void disable_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ mask_evtchn(evtchn);
-+}
-+
-+static void ack_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn)) {
-+ mask_evtchn(evtchn);
-+ clear_evtchn(evtchn);
-+ }
-+}
-+
-+static void end_dynirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
-+ unmask_evtchn(evtchn);
-+}
-+
-+static struct hw_interrupt_type dynirq_type = {
-+ "Dynamic-irq",
-+ startup_dynirq,
-+ shutdown_dynirq,
-+ enable_dynirq,
-+ disable_dynirq,
-+ ack_dynirq,
-+ end_dynirq,
-+ set_affinity_irq
-+};
-+
-+static inline void pirq_unmask_notify(int pirq)
-+{
-+ physdev_op_t op;
-+ if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
-+ op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
-+ (void)HYPERVISOR_physdev_op(&op);
-+ }
-+}
-+
-+static inline void pirq_query_unmask(int pirq)
-+{
-+ physdev_op_t op;
-+ op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
-+ op.u.irq_status_query.irq = pirq;
-+ (void)HYPERVISOR_physdev_op(&op);
-+ clear_bit(pirq, &pirq_needs_unmask_notify[0]);
-+ if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
-+ set_bit(pirq, &pirq_needs_unmask_notify[0]);
-+}
-+
-+/*
-+ * On startup, if there is no action associated with the IRQ then we are
-+ * probing. In this case we should not share with others as it will confuse us.
-+ */
-+#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
-+
-+static unsigned int startup_pirq(unsigned int irq)
-+{
-+ evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ goto out;
-+
-+ op.u.bind_pirq.pirq = irq;
-+ /* NB. We are happy to share unless we are probing. */
-+ op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
-+ if (HYPERVISOR_event_channel_op(&op) != 0) {
-+ if (!probing_irq(irq))
-+ printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
-+ irq);
-+ return 0;
-+ }
-+ evtchn = op.u.bind_pirq.port;
-+
-+ pirq_query_unmask(irq_to_pirq(irq));
-+
-+ bind_evtchn_to_cpu(evtchn, 0);
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
-+
-+ out:
-+ unmask_evtchn(evtchn);
-+ pirq_unmask_notify(irq_to_pirq(irq));
-+
-+ return 0;
-+}
-+
-+static void shutdown_pirq(unsigned int irq)
-+{
-+ evtchn_op_t op = { .cmd = EVTCHNOP_close };
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (!VALID_EVTCHN(evtchn))
-+ return;
-+
-+ mask_evtchn(evtchn);
-+
-+ op.u.close.port = evtchn;
-+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+
-+ bind_evtchn_to_cpu(evtchn, 0);
-+ evtchn_to_irq[evtchn] = -1;
-+ irq_info[irq] = IRQ_UNBOUND;
-+}
-+
-+static void enable_pirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn)) {
-+ unmask_evtchn(evtchn);
-+ pirq_unmask_notify(irq_to_pirq(irq));
-+ }
-+}
-+
-+static void disable_pirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ mask_evtchn(evtchn);
-+}
-+
-+static void ack_pirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn)) {
-+ mask_evtchn(evtchn);
-+ clear_evtchn(evtchn);
-+ }
-+}
-+
-+static void end_pirq(unsigned int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
-+ unmask_evtchn(evtchn);
-+ pirq_unmask_notify(irq_to_pirq(irq));
-+ }
-+}
-+
-+static struct hw_interrupt_type pirq_type = {
-+ "Phys-irq",
-+ startup_pirq,
-+ shutdown_pirq,
-+ enable_pirq,
-+ disable_pirq,
-+ ack_pirq,
-+ end_pirq,
-+ set_affinity_irq
-+};
-+
-+void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
-+{
-+ int evtchn = evtchn_from_irq(i);
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ if (!VALID_EVTCHN(evtchn))
-+ return;
-+ BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
-+ synch_set_bit(evtchn, &s->evtchn_pending[0]);
-+}
-+
-+void notify_remote_via_irq(int irq)
-+{
-+ int evtchn = evtchn_from_irq(irq);
-+
-+ if (VALID_EVTCHN(evtchn))
-+ notify_remote_via_evtchn(evtchn);
-+}
-+EXPORT_SYMBOL(notify_remote_via_irq);
-+
-+void mask_evtchn(int port)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ synch_set_bit(port, &s->evtchn_mask[0]);
-+}
-+EXPORT_SYMBOL(mask_evtchn);
-+
-+void unmask_evtchn(int port)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ unsigned int cpu = smp_processor_id();
-+ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-+
-+ /* Slow path (hypercall) if this is a non-local port. */
-+ if (unlikely(cpu != cpu_from_evtchn(port))) {
-+ evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
-+ .u.unmask.port = port };
-+ (void)HYPERVISOR_event_channel_op(&op);
-+ return;
-+ }
-+
-+ synch_clear_bit(port, &s->evtchn_mask[0]);
-+
-+ /*
-+ * The following is basically the equivalent of 'hw_resend_irq'. Just
-+ * like a real IO-APIC we 'lose the interrupt edge' if the channel is
-+ * masked.
-+ */
-+ if (synch_test_bit(port, &s->evtchn_pending[0]) &&
-+ !synch_test_and_set_bit(port / BITS_PER_LONG,
-+ &vcpu_info->evtchn_pending_sel)) {
-+ vcpu_info->evtchn_upcall_pending = 1;
-+ if (!vcpu_info->evtchn_upcall_mask)
-+ force_evtchn_callback();
-+ }
-+}
-+EXPORT_SYMBOL(unmask_evtchn);
-+
-+void irq_resume(void)
-+{
-+ evtchn_op_t op;
-+ int cpu, pirq, virq, ipi, irq, evtchn;
-+
-+ init_evtchn_cpu_bindings();
-+
-+ /* New event-channel space is not 'live' yet. */
-+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
-+ mask_evtchn(evtchn);
-+
-+ /* Check that no PIRQs are still bound. */
-+ for (pirq = 0; pirq < NR_PIRQS; pirq++)
-+ BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
-+
-+ /* Secondary CPUs must have no VIRQ or IPI bindings. */
-+ for (cpu = 1; cpu < NR_CPUS; cpu++) {
-+ for (virq = 0; virq < NR_VIRQS; virq++)
-+ BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
-+ for (ipi = 0; ipi < NR_IPIS; ipi++)
-+ BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
-+ }
-+
-+ /* No IRQ <-> event-channel mappings. */
-+ for (irq = 0; irq < NR_IRQS; irq++)
-+ irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
-+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
-+ evtchn_to_irq[evtchn] = -1;
-+
-+ /* Primary CPU: rebind VIRQs automatically. */
-+ for (virq = 0; virq < NR_VIRQS; virq++) {
-+ if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
-+ continue;
-+
-+ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
-+
-+ /* Get a new binding from Xen. */
-+ memset(&op, 0, sizeof(op));
-+ op.cmd = EVTCHNOP_bind_virq;
-+ op.u.bind_virq.virq = virq;
-+ op.u.bind_virq.vcpu = 0;
-+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+ evtchn = op.u.bind_virq.port;
-+
-+ /* Record the new mapping. */
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+
-+ /* Ready for use. */
-+ unmask_evtchn(evtchn);
-+ }
-+
-+ /* Primary CPU: rebind IPIs automatically. */
-+ for (ipi = 0; ipi < NR_IPIS; ipi++) {
-+ if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
-+ continue;
-+
-+ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
-+
-+ /* Get a new binding from Xen. */
-+ memset(&op, 0, sizeof(op));
-+ op.cmd = EVTCHNOP_bind_ipi;
-+ op.u.bind_ipi.vcpu = 0;
-+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+ evtchn = op.u.bind_ipi.port;
-+
-+ /* Record the new mapping. */
-+ evtchn_to_irq[evtchn] = irq;
-+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+
-+ /* Ready for use. */
-+ unmask_evtchn(evtchn);
-+ }
-+}
-+
-+void __init init_IRQ(void)
-+{
-+ int i;
-+ int cpu;
-+
-+ irq_ctx_init(0);
-+
-+ spin_lock_init(&irq_mapping_update_lock);
-+
-+ init_evtchn_cpu_bindings();
-+
-+ /* No VIRQ or IPI bindings. */
-+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+ for (i = 0; i < NR_VIRQS; i++)
-+ per_cpu(virq_to_irq, cpu)[i] = -1;
-+ for (i = 0; i < NR_IPIS; i++)
-+ per_cpu(ipi_to_irq, cpu)[i] = -1;
-+ }
-+
-+ /* No event-channel -> IRQ mappings. */
-+ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
-+ evtchn_to_irq[i] = -1;
-+ mask_evtchn(i); /* No event channels are 'live' right now. */
-+ }
-+
-+ /* No IRQ -> event-channel mappings. */
-+ for (i = 0; i < NR_IRQS; i++)
-+ irq_info[i] = IRQ_UNBOUND;
-+
-+ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-+ for (i = 0; i < NR_DYNIRQS; i++) {
-+ irq_bindcount[dynirq_to_irq(i)] = 0;
-+
-+ irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
-+ irq_desc[dynirq_to_irq(i)].action = NULL;
-+ irq_desc[dynirq_to_irq(i)].depth = 1;
-+ irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
-+ }
-+
-+ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
-+ for (i = 0; i < NR_PIRQS; i++) {
-+ irq_bindcount[pirq_to_irq(i)] = 1;
-+
-+#ifdef RTC_IRQ
-+ /* If not domain 0, force our RTC driver to fail its probe. */
-+ if ((i == RTC_IRQ) &&
-+ !(xen_start_info->flags & SIF_INITDOMAIN))
-+ continue;
-+#endif
-+
-+ irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
-+ irq_desc[pirq_to_irq(i)].action = NULL;
-+ irq_desc[pirq_to_irq(i)].depth = 1;
-+ irq_desc[pirq_to_irq(i)].handler = &pirq_type;
-+ }
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/core/features.c b/drivers/xen/core/features.c
-new file mode 100644
-index 0000000..297d13f
---- /dev/null
-+++ b/drivers/xen/core/features.c
-@@ -0,0 +1,29 @@
-+/******************************************************************************
-+ * features.c
-+ *
-+ * Xen feature flags.
-+ *
-+ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
-+ */
-+#include <linux/types.h>
-+#include <linux/cache.h>
-+#include <linux/module.h>
-+#include <asm/hypervisor.h>
-+#include <xen/features.h>
-+
-+u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
-+EXPORT_SYMBOL(xen_features);
-+
-+void setup_xen_features(void)
-+{
-+ xen_feature_info_t fi;
-+ int i, j;
-+
-+ for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
-+ fi.submap_idx = i;
-+ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
-+ break;
-+ for (j=0; j<32; j++)
-+ xen_features[i*32+j] = !!(fi.submap & 1<<j);
-+ }
-+}
-diff --git a/drivers/xen/core/gnttab.c b/drivers/xen/core/gnttab.c
-new file mode 100644
-index 0000000..0f1a295
---- /dev/null
-+++ b/drivers/xen/core/gnttab.c
-@@ -0,0 +1,426 @@
-+/******************************************************************************
-+ * gnttab.c
-+ *
-+ * Granting foreign access to our memory reservation.
-+ *
-+ * Copyright (c) 2005, Christopher Clark
-+ * Copyright (c) 2004-2005, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <asm/pgtable.h>
-+#include <xen/interface/xen.h>
-+#include <asm/fixmap.h>
-+#include <asm/uaccess.h>
-+#include <xen/gnttab.h>
-+#include <asm/synch_bitops.h>
-+
-+#if 1
-+#define ASSERT(_p) \
-+ if (!(_p)) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
-+ #_p , __LINE__, __FILE__); *(int*)0=0; }
-+#else
-+#define ASSERT(_p) ((void)0)
-+#endif
-+
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_grant: " fmt, ##args)
-+
-+
-+EXPORT_SYMBOL(gnttab_grant_foreign_access);
-+EXPORT_SYMBOL(gnttab_end_foreign_access_ref);
-+EXPORT_SYMBOL(gnttab_end_foreign_access);
-+EXPORT_SYMBOL(gnttab_query_foreign_access);
-+EXPORT_SYMBOL(gnttab_grant_foreign_transfer);
-+EXPORT_SYMBOL(gnttab_end_foreign_transfer_ref);
-+EXPORT_SYMBOL(gnttab_end_foreign_transfer);
-+EXPORT_SYMBOL(gnttab_alloc_grant_references);
-+EXPORT_SYMBOL(gnttab_free_grant_references);
-+EXPORT_SYMBOL(gnttab_free_grant_reference);
-+EXPORT_SYMBOL(gnttab_claim_grant_reference);
-+EXPORT_SYMBOL(gnttab_release_grant_reference);
-+EXPORT_SYMBOL(gnttab_request_free_callback);
-+EXPORT_SYMBOL(gnttab_grant_foreign_access_ref);
-+EXPORT_SYMBOL(gnttab_grant_foreign_transfer_ref);
-+
-+/* External tools reserve first few grant table entries. */
-+#define NR_RESERVED_ENTRIES 8
-+
-+#define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
-+#define GNTTAB_LIST_END (NR_GRANT_ENTRIES + 1)
-+
-+static grant_ref_t gnttab_list[NR_GRANT_ENTRIES];
-+static int gnttab_free_count;
-+static grant_ref_t gnttab_free_head;
-+static spinlock_t gnttab_list_lock = SPIN_LOCK_UNLOCKED;
-+
-+static grant_entry_t *shared;
-+
-+static struct gnttab_free_callback *gnttab_free_callback_list = NULL;
-+
-+static int
-+get_free_entries(int count)
-+{
-+ unsigned long flags;
-+ int ref;
-+ grant_ref_t head;
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+ if (gnttab_free_count < count) {
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+ return -1;
-+ }
-+ ref = head = gnttab_free_head;
-+ gnttab_free_count -= count;
-+ while (count-- > 1)
-+ head = gnttab_list[head];
-+ gnttab_free_head = gnttab_list[head];
-+ gnttab_list[head] = GNTTAB_LIST_END;
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+ return ref;
-+}
-+
-+#define get_free_entry() get_free_entries(1)
-+
-+static void
-+do_free_callbacks(void)
-+{
-+ struct gnttab_free_callback *callback, *next;
-+
-+ callback = gnttab_free_callback_list;
-+ gnttab_free_callback_list = NULL;
-+
-+ while (callback != NULL) {
-+ next = callback->next;
-+ if (gnttab_free_count >= callback->count) {
-+ callback->next = NULL;
-+ callback->fn(callback->arg);
-+ } else {
-+ callback->next = gnttab_free_callback_list;
-+ gnttab_free_callback_list = callback;
-+ }
-+ callback = next;
-+ }
-+}
-+
-+static inline void
-+check_free_callbacks(void)
-+{
-+ if (unlikely(gnttab_free_callback_list))
-+ do_free_callbacks();
-+}
-+
-+static void
-+put_free_entry(grant_ref_t ref)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+ gnttab_list[ref] = gnttab_free_head;
-+ gnttab_free_head = ref;
-+ gnttab_free_count++;
-+ check_free_callbacks();
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+
-+/*
-+ * Public grant-issuing interface functions
-+ */
-+
-+int
-+gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly)
-+{
-+ int ref;
-+
-+ if (unlikely((ref = get_free_entry()) == -1))
-+ return -ENOSPC;
-+
-+ shared[ref].frame = frame;
-+ shared[ref].domid = domid;
-+ wmb();
-+ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
-+
-+ return ref;
-+}
-+
-+void
-+gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
-+ unsigned long frame, int readonly)
-+{
-+ shared[ref].frame = frame;
-+ shared[ref].domid = domid;
-+ wmb();
-+ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
-+}
-+
-+
-+int
-+gnttab_query_foreign_access(grant_ref_t ref)
-+{
-+ u16 nflags;
-+
-+ nflags = shared[ref].flags;
-+
-+ return (nflags & (GTF_reading|GTF_writing));
-+}
-+
-+int
-+gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
-+{
-+ u16 flags, nflags;
-+
-+ nflags = shared[ref].flags;
-+ do {
-+ if ((flags = nflags) & (GTF_reading|GTF_writing)) {
-+ printk(KERN_ALERT "WARNING: g.e. still in use!\n");
-+ return 0;
-+ }
-+ } while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
-+ flags);
-+
-+ return 1;
-+}
-+
-+void
-+gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page)
-+{
-+ if (gnttab_end_foreign_access_ref(ref, readonly)) {
-+ put_free_entry(ref);
-+ if (page != 0) {
-+ free_page(page);
-+ }
-+ } else {
-+ /* XXX This needs to be fixed so that the ref and page are
-+ placed on a list to be freed up later. */
-+ printk(KERN_WARNING
-+ "WARNING: leaking g.e. and page still in use!\n");
-+ }
-+}
-+
-+int
-+gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
-+{
-+ int ref;
-+
-+ if (unlikely((ref = get_free_entry()) == -1))
-+ return -ENOSPC;
-+ gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
-+
-+ return ref;
-+}
-+
-+void
-+gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
-+ unsigned long pfn)
-+{
-+ shared[ref].frame = pfn;
-+ shared[ref].domid = domid;
-+ wmb();
-+ shared[ref].flags = GTF_accept_transfer;
-+}
-+
-+unsigned long
-+gnttab_end_foreign_transfer_ref(grant_ref_t ref)
-+{
-+ unsigned long frame;
-+ u16 flags;
-+
-+ /*
-+ * If a transfer is not even yet started, try to reclaim the grant
-+ * reference and return failure (== 0).
-+ */
-+ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
-+ if (synch_cmpxchg(&shared[ref].flags, flags, 0) == flags)
-+ return 0;
-+ cpu_relax();
-+ }
-+
-+ /* If a transfer is in progress then wait until it is completed. */
-+ while (!(flags & GTF_transfer_completed)) {
-+ flags = shared[ref].flags;
-+ cpu_relax();
-+ }
-+
-+ /* Read the frame number /after/ reading completion status. */
-+ rmb();
-+ frame = shared[ref].frame;
-+ BUG_ON(frame == 0);
-+
-+ return frame;
-+}
-+
-+unsigned long
-+gnttab_end_foreign_transfer(grant_ref_t ref)
-+{
-+ unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
-+ put_free_entry(ref);
-+ return frame;
-+}
-+
-+void
-+gnttab_free_grant_reference(grant_ref_t ref)
-+{
-+
-+ put_free_entry(ref);
-+}
-+
-+void
-+gnttab_free_grant_references(grant_ref_t head)
-+{
-+ grant_ref_t ref;
-+ unsigned long flags;
-+ int count = 1;
-+ if (head == GNTTAB_LIST_END)
-+ return;
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+ ref = head;
-+ while (gnttab_list[ref] != GNTTAB_LIST_END) {
-+ ref = gnttab_list[ref];
-+ count++;
-+ }
-+ gnttab_list[ref] = gnttab_free_head;
-+ gnttab_free_head = head;
-+ gnttab_free_count += count;
-+ check_free_callbacks();
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+
-+int
-+gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
-+{
-+ int h = get_free_entries(count);
-+
-+ if (h == -1)
-+ return -ENOSPC;
-+
-+ *head = h;
-+
-+ return 0;
-+}
-+
-+int
-+gnttab_claim_grant_reference(grant_ref_t *private_head)
-+{
-+ grant_ref_t g = *private_head;
-+ if (unlikely(g == GNTTAB_LIST_END))
-+ return -ENOSPC;
-+ *private_head = gnttab_list[g];
-+ return g;
-+}
-+
-+void
-+gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release)
-+{
-+ gnttab_list[release] = *private_head;
-+ *private_head = release;
-+}
-+
-+void
-+gnttab_request_free_callback(struct gnttab_free_callback *callback,
-+ void (*fn)(void *), void *arg, u16 count)
-+{
-+ unsigned long flags;
-+ spin_lock_irqsave(&gnttab_list_lock, flags);
-+ if (callback->next)
-+ goto out;
-+ callback->fn = fn;
-+ callback->arg = arg;
-+ callback->count = count;
-+ callback->next = gnttab_free_callback_list;
-+ gnttab_free_callback_list = callback;
-+ check_free_callbacks();
-+ out:
-+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+
-+int
-+gnttab_resume(void)
-+{
-+ gnttab_setup_table_t setup;
-+ unsigned long frames[NR_GRANT_FRAMES];
-+ int i;
-+
-+ setup.dom = DOMID_SELF;
-+ setup.nr_frames = NR_GRANT_FRAMES;
-+ setup.frame_list = frames;
-+
-+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
-+ BUG_ON(setup.status != 0);
-+
-+#ifdef __ia64__
-+ shared = __va(frames[0] << PAGE_SHIFT);
-+ printk("grant table at %p\n", shared);
-+#else
-+ for (i = 0; i < NR_GRANT_FRAMES; i++)
-+ set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
-+#endif
-+
-+ return 0;
-+}
-+
-+int
-+gnttab_suspend(void)
-+{
-+ int i;
-+
-+ for (i = 0; i < NR_GRANT_FRAMES; i++)
-+ clear_fixmap(FIX_GNTTAB_END - i);
-+
-+ return 0;
-+}
-+
-+static int __init
-+gnttab_init(void)
-+{
-+ int i;
-+
-+ if (xen_init() < 0)
-+ return -ENODEV;
-+
-+ BUG_ON(gnttab_resume());
-+
-+#ifndef __ia64__
-+ shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
-+#endif
-+
-+ for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
-+ gnttab_list[i] = i + 1;
-+ gnttab_free_count = NR_GRANT_ENTRIES - NR_RESERVED_ENTRIES;
-+ gnttab_free_head = NR_RESERVED_ENTRIES;
-+
-+ printk("Grant table initialized\n");
-+ return 0;
-+}
-+
-+core_initcall(gnttab_init);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/core/reboot.c b/drivers/xen/core/reboot.c
-new file mode 100644
-index 0000000..1fa1f56
---- /dev/null
-+++ b/drivers/xen/core/reboot.c
-@@ -0,0 +1,441 @@
-+#define __KERNEL_SYSCALLS__
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/unistd.h>
-+#include <linux/module.h>
-+#include <linux/reboot.h>
-+#include <linux/sysrq.h>
-+#include <linux/stringify.h>
-+#include <asm/irq.h>
-+#include <asm/mmu_context.h>
-+#include <xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/dom0_ops.h>
-+#include <xen/xenbus.h>
-+#include <linux/cpu.h>
-+#include <linux/kthread.h>
-+#include <xen/xencons.h>
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+/*
-+ * Power off function, if any
-+ */
-+void (*pm_power_off)(void);
-+EXPORT_SYMBOL(pm_power_off);
-+#endif
-+
-+#define SHUTDOWN_INVALID -1
-+#define SHUTDOWN_POWEROFF 0
-+#define SHUTDOWN_REBOOT 1
-+#define SHUTDOWN_SUSPEND 2
-+/* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
-+ * report a crash, not be instructed to crash!
-+ * HALT is the same as POWEROFF, as far as we're concerned. The tools use
-+ * the distinction when we return the reason code to them.
-+ */
-+#define SHUTDOWN_HALT 4
-+
-+void machine_emergency_restart(void)
-+{
-+ /* We really want to get pending console data out before we die. */
-+ xencons_force_flush();
-+ HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_reboot);
-+}
-+
-+void machine_restart(char * __unused)
-+{
-+ machine_emergency_restart();
-+}
-+
-+void machine_halt(void)
-+{
-+ machine_power_off();
-+}
-+
-+void machine_power_off(void)
-+{
-+ /* We really want to get pending console data out before we die. */
-+ xencons_force_flush();
-+ HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_poweroff);
-+}
-+
-+int reboot_thru_bios = 0; /* for dmi_scan.c */
-+EXPORT_SYMBOL(machine_restart);
-+EXPORT_SYMBOL(machine_halt);
-+EXPORT_SYMBOL(machine_power_off);
-+
-+
-+/******************************************************************************
-+ * Stop/pickle callback handling.
-+ */
-+
-+/* Ignore multiple shutdown requests. */
-+static int shutting_down = SHUTDOWN_INVALID;
-+static void __shutdown_handler(void *unused);
-+static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
-+
-+#ifndef CONFIG_HOTPLUG_CPU
-+#define cpu_down(x) (-EOPNOTSUPP)
-+#define cpu_up(x) (-EOPNOTSUPP)
-+#endif
-+
-+
-+static int __do_suspend(void *ignore)
-+{
-+ int i, j, k, fpp;
-+
-+ extern unsigned long max_pfn;
-+ extern unsigned long *pfn_to_mfn_frame_list_list;
-+ extern unsigned long *pfn_to_mfn_frame_list[];
-+
-+ extern int gnttab_suspend(void);
-+ extern int gnttab_resume(void);
-+ extern void time_resume(void);
-+
-+#ifdef CONFIG_SMP
-+ cpumask_t prev_online_cpus;
-+ int vcpu_prepare(int vcpu);
-+#endif
-+
-+ int err = 0;
-+
-+ BUG_ON(smp_processor_id() != 0);
-+ BUG_ON(in_interrupt());
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ printk(KERN_WARNING "Cannot suspend in "
-+ "auto_translated_physmap mode.\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
-+ if (num_online_cpus() > 1) {
-+ printk(KERN_WARNING "Can't suspend SMP guests "
-+ "without CONFIG_HOTPLUG_CPU\n");
-+ return -EOPNOTSUPP;
-+ }
-+#endif
-+
-+ xenbus_suspend();
-+
-+ lock_cpu_hotplug();
-+#ifdef CONFIG_SMP
-+ /*
-+ * Take all other CPUs offline. We hold the hotplug semaphore to
-+ * avoid other processes bringing up CPUs under our feet.
-+ */
-+ cpus_clear(prev_online_cpus);
-+ while (num_online_cpus() > 1) {
-+ for_each_online_cpu(i) {
-+ if (i == 0)
-+ continue;
-+ unlock_cpu_hotplug();
-+ err = cpu_down(i);
-+ lock_cpu_hotplug();
-+ if (err != 0) {
-+ printk(KERN_CRIT "Failed to take all CPUs "
-+ "down: %d.\n", err);
-+ goto out_reenable_cpus;
-+ }
-+ cpu_set(i, prev_online_cpus);
-+ }
-+ }
-+#endif
-+
-+ preempt_disable();
-+
-+#ifdef __i386__
-+ kmem_cache_shrink(pgd_cache);
-+ mm_pin_all();
-+#endif
-+
-+ __cli();
-+ preempt_enable();
-+ unlock_cpu_hotplug();
-+
-+ gnttab_suspend();
-+
-+ HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+ clear_fixmap(FIX_SHARED_INFO);
-+
-+ xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
-+ xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
-+
-+ /*
-+ * We'll stop somewhere inside this hypercall. When it returns,
-+ * we'll start resuming after the restore.
-+ */
-+ HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
-+
-+ shutting_down = SHUTDOWN_INVALID;
-+
-+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+
-+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+
-+ memset(empty_zero_page, 0, PAGE_SIZE);
-+
-+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+ virt_to_mfn(pfn_to_mfn_frame_list_list);
-+
-+ fpp = PAGE_SIZE/sizeof(unsigned long);
-+ for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
-+ if ((j % fpp) == 0) {
-+ k++;
-+ pfn_to_mfn_frame_list_list[k] =
-+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+ j = 0;
-+ }
-+ pfn_to_mfn_frame_list[k][j] =
-+ virt_to_mfn(&phys_to_machine_mapping[i]);
-+ }
-+ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-+
-+ gnttab_resume();
-+
-+ irq_resume();
-+
-+ time_resume();
-+
-+ __sti();
-+
-+ xencons_resume();
-+
-+#ifdef CONFIG_SMP
-+ for_each_cpu(i)
-+ vcpu_prepare(i);
-+
-+#endif
-+
-+ /*
-+ * Only resume xenbus /after/ we've prepared our VCPUs; otherwise
-+ * the VCPU hotplug callback can race with our vcpu_prepare
-+ */
-+ xenbus_resume();
-+
-+#ifdef CONFIG_SMP
-+ out_reenable_cpus:
-+ for_each_cpu_mask(i, prev_online_cpus) {
-+ j = cpu_up(i);
-+ if ((j != 0) && !cpu_online(i)) {
-+ printk(KERN_CRIT "Failed to bring cpu "
-+ "%d back up (%d).\n",
-+ i, j);
-+ err = j;
-+ }
-+ }
-+#endif
-+
-+ return err;
-+}
-+
-+static int shutdown_process(void *__unused)
-+{
-+ static char *envp[] = { "HOME=/", "TERM=linux",
-+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
-+ static char *restart_argv[] = { "/sbin/reboot", NULL };
-+ static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
-+
-+ extern asmlinkage long sys_reboot(int magic1, int magic2,
-+ unsigned int cmd, void *arg);
-+
-+ daemonize("shutdown");
-+
-+ switch (shutting_down) {
-+ case SHUTDOWN_POWEROFF:
-+ case SHUTDOWN_HALT:
-+ if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
-+ sys_reboot(LINUX_REBOOT_MAGIC1,
-+ LINUX_REBOOT_MAGIC2,
-+ LINUX_REBOOT_CMD_POWER_OFF,
-+ NULL);
-+ }
-+ break;
-+
-+ case SHUTDOWN_REBOOT:
-+ if (execve("/sbin/reboot", restart_argv, envp) < 0) {
-+ sys_reboot(LINUX_REBOOT_MAGIC1,
-+ LINUX_REBOOT_MAGIC2,
-+ LINUX_REBOOT_CMD_RESTART,
-+ NULL);
-+ }
-+ break;
-+ }
-+
-+ shutting_down = SHUTDOWN_INVALID; /* could try again */
-+
-+ return 0;
-+}
-+
-+static int kthread_create_on_cpu(int (*f)(void *arg),
-+ void *arg,
-+ const char *name,
-+ int cpu)
-+{
-+ struct task_struct *p;
-+ p = kthread_create(f, arg, name);
-+ if (IS_ERR(p))
-+ return PTR_ERR(p);
-+ kthread_bind(p, cpu);
-+ wake_up_process(p);
-+ return 0;
-+}
-+
-+static void __shutdown_handler(void *unused)
-+{
-+ int err;
-+
-+ if (shutting_down != SHUTDOWN_SUSPEND)
-+ err = kernel_thread(shutdown_process, NULL,
-+ CLONE_FS | CLONE_FILES);
-+ else
-+ err = kthread_create_on_cpu(__do_suspend, NULL, "suspend", 0);
-+
-+ if (err < 0) {
-+ printk(KERN_WARNING "Error creating shutdown process (%d): "
-+ "retrying...\n", -err);
-+ schedule_delayed_work(&shutdown_work, HZ/2);
-+ }
-+}
-+
-+static void shutdown_handler(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ char *str;
-+ xenbus_transaction_t xbt;
-+ int err;
-+
-+ if (shutting_down != SHUTDOWN_INVALID)
-+ return;
-+
-+ again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err)
-+ return;
-+ str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
-+ /* Ignore read errors and empty reads. */
-+ if (XENBUS_IS_ERR_READ(str)) {
-+ xenbus_transaction_end(xbt, 1);
-+ return;
-+ }
-+
-+ xenbus_write(xbt, "control", "shutdown", "");
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN) {
-+ kfree(str);
-+ goto again;
-+ }
-+
-+ if (strcmp(str, "poweroff") == 0)
-+ shutting_down = SHUTDOWN_POWEROFF;
-+ else if (strcmp(str, "reboot") == 0)
-+ shutting_down = SHUTDOWN_REBOOT;
-+ else if (strcmp(str, "suspend") == 0)
-+ shutting_down = SHUTDOWN_SUSPEND;
-+ else if (strcmp(str, "halt") == 0)
-+ shutting_down = SHUTDOWN_HALT;
-+ else {
-+ printk("Ignoring shutdown request: %s\n", str);
-+ shutting_down = SHUTDOWN_INVALID;
-+ }
-+
-+ if (shutting_down != SHUTDOWN_INVALID)
-+ schedule_work(&shutdown_work);
-+
-+ kfree(str);
-+}
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
-+ unsigned int len)
-+{
-+ char sysrq_key = '\0';
-+ xenbus_transaction_t xbt;
-+ int err;
-+
-+ again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err)
-+ return;
-+ if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
-+ printk(KERN_ERR "Unable to read sysrq code in "
-+ "control/sysrq\n");
-+ xenbus_transaction_end(xbt, 1);
-+ return;
-+ }
-+
-+ if (sysrq_key != '\0')
-+ xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+
-+ if (sysrq_key != '\0') {
-+ handle_sysrq(sysrq_key, NULL, NULL);
-+ }
-+}
-+#endif
-+
-+static struct xenbus_watch shutdown_watch = {
-+ .node = "control/shutdown",
-+ .callback = shutdown_handler
-+};
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static struct xenbus_watch sysrq_watch = {
-+ .node ="control/sysrq",
-+ .callback = sysrq_handler
-+};
-+#endif
-+
-+static struct notifier_block xenstore_notifier;
-+
-+static int setup_shutdown_watcher(struct notifier_block *notifier,
-+ unsigned long event,
-+ void *data)
-+{
-+ int err1 = 0;
-+#ifdef CONFIG_MAGIC_SYSRQ
-+ int err2 = 0;
-+#endif
-+
-+ err1 = register_xenbus_watch(&shutdown_watch);
-+#ifdef CONFIG_MAGIC_SYSRQ
-+ err2 = register_xenbus_watch(&sysrq_watch);
-+#endif
-+
-+ if (err1)
-+ printk(KERN_ERR "Failed to set shutdown watcher\n");
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+ if (err2)
-+ printk(KERN_ERR "Failed to set sysrq watcher\n");
-+#endif
-+
-+ return NOTIFY_DONE;
-+}
-+
-+static int __init setup_shutdown_event(void)
-+{
-+
-+ xenstore_notifier.notifier_call = setup_shutdown_watcher;
-+
-+ register_xenstore_notifier(&xenstore_notifier);
-+
-+ return 0;
-+}
-+
-+subsys_initcall(setup_shutdown_event);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/core/skbuff.c b/drivers/xen/core/skbuff.c
-new file mode 100644
-index 0000000..c12362d
---- /dev/null
-+++ b/drivers/xen/core/skbuff.c
-@@ -0,0 +1,144 @@
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/init.h>
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/hypervisor.h>
-+
-+/* Referenced in netback.c. */
-+/*static*/ kmem_cache_t *skbuff_cachep;
-+EXPORT_SYMBOL(skbuff_cachep);
-+
-+#define MAX_SKBUFF_ORDER 4
-+static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
-+
-+static struct {
-+ int size;
-+ kmem_cache_t *cachep;
-+} skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
-+
-+struct sk_buff *__alloc_skb(unsigned int length, gfp_t gfp_mask,
-+ int fclone)
-+{
-+ int order, i;
-+ kmem_cache_t *cachep;
-+
-+ length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
-+
-+ if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
-+ for (i = 0; skbuff_small[i].size < length; i++)
-+ continue;
-+ cachep = skbuff_small[i].cachep;
-+ } else {
-+ order = get_order(length);
-+ if (order > MAX_SKBUFF_ORDER) {
-+ printk(KERN_ALERT "Attempt to allocate order %d "
-+ "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
-+ return NULL;
-+ }
-+ cachep = skbuff_order_cachep[order];
-+ }
-+
-+ length -= sizeof(struct skb_shared_info);
-+
-+ return alloc_skb_from_cache(cachep, length, gfp_mask, fclone);
-+}
-+
-+struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask)
-+{
-+ struct sk_buff *skb;
-+ int order;
-+
-+ length = SKB_DATA_ALIGN(length + 16);
-+ order = get_order(length + sizeof(struct skb_shared_info));
-+ if (order > MAX_SKBUFF_ORDER) {
-+ printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
-+ "Increase MAX_SKBUFF_ORDER.\n", order);
-+ return NULL;
-+ }
-+
-+ skb = alloc_skb_from_cache(
-+ skbuff_order_cachep[order], length, gfp_mask, 0);
-+ if (skb != NULL)
-+ skb_reserve(skb, 16);
-+
-+ return skb;
-+}
-+
-+static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
-+{
-+ int order = 0;
-+
-+ while (skbuff_order_cachep[order] != cachep)
-+ order++;
-+
-+ /* Do our best to allocate contiguous memory but fall back to IOMMU. */
-+ if (order != 0)
-+ (void)xen_create_contiguous_region(
-+ (unsigned long)buf, order, 0);
-+
-+ scrub_pages(buf, 1 << order);
-+}
-+
-+static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
-+{
-+ int order = 0;
-+
-+ while (skbuff_order_cachep[order] != cachep)
-+ order++;
-+
-+ if (order != 0)
-+ xen_destroy_contiguous_region((unsigned long)buf, order);
-+}
-+
-+static int __init skbuff_init(void)
-+{
-+ static char name[MAX_SKBUFF_ORDER + 1][20];
-+ static char small_name[ARRAY_SIZE(skbuff_small)][20];
-+ unsigned long size;
-+ int i, order;
-+
-+ for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
-+ size = skbuff_small[i].size;
-+ sprintf(small_name[i], "xen-skb-%lu", size);
-+ /*
-+ * No ctor/dtor: objects do not span page boundaries, and they
-+ * are only used on transmit path so no need for scrubbing.
-+ */
-+ skbuff_small[i].cachep = kmem_cache_create(
-+ small_name[i], size, size, 0, NULL, NULL);
-+ }
-+
-+ for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
-+ size = PAGE_SIZE << order;
-+ sprintf(name[order], "xen-skb-%lu", size);
-+ skbuff_order_cachep[order] = kmem_cache_create(
-+ name[order], size, size, 0, skbuff_ctor, skbuff_dtor);
-+ }
-+
-+ skbuff_cachep = skbuff_order_cachep[0];
-+
-+ return 0;
-+}
-+core_initcall(skbuff_init);
-+
-+EXPORT_SYMBOL(__dev_alloc_skb);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/core/smpboot.c b/drivers/xen/core/smpboot.c
-new file mode 100644
-index 0000000..a8a3b05
---- /dev/null
-+++ b/drivers/xen/core/smpboot.c
-@@ -0,0 +1,464 @@
-+/*
-+ * Xen SMP booting functions
-+ *
-+ * See arch/i386/kernel/smpboot.c for copyright and credits for derived
-+ * portions of this file.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/smp_lock.h>
-+#include <linux/irq.h>
-+#include <linux/bootmem.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/percpu.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/pgalloc.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/vcpu.h>
-+#include <xen/xenbus.h>
-+
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+#include <asm/smp_alt.h>
-+#endif
-+
-+extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
-+extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
-+
-+extern void local_setup_timer(unsigned int cpu);
-+extern void local_teardown_timer(unsigned int cpu);
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void system_call(void);
-+extern void smp_trap_init(trap_info_t *);
-+
-+/* Number of siblings per CPU package */
-+int smp_num_siblings = 1;
-+__typeof__(phys_proc_id) phys_proc_id; /* Package ID of each logical CPU */
-+EXPORT_SYMBOL(phys_proc_id);
-+__typeof__(cpu_core_id) cpu_core_id; /* Core ID of each logical CPU */
-+EXPORT_SYMBOL(cpu_core_id);
-+
-+cpumask_t cpu_online_map;
-+EXPORT_SYMBOL(cpu_online_map);
-+cpumask_t cpu_possible_map;
-+EXPORT_SYMBOL(cpu_possible_map);
-+
-+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_data);
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+DEFINE_PER_CPU(int, cpu_state) = { 0 };
-+#endif
-+
-+static DEFINE_PER_CPU(int, resched_irq);
-+static DEFINE_PER_CPU(int, callfunc_irq);
-+static char resched_name[NR_CPUS][15];
-+static char callfunc_name[NR_CPUS][15];
-+
-+u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+void *xquad_portio;
-+
-+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_core_map);
-+
-+#if defined(__i386__)
-+u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+#elif !defined(CONFIG_X86_IO_APIC)
-+unsigned int maxcpus = NR_CPUS;
-+#endif
-+
-+void __init prefill_possible_map(void)
-+{
-+ int i, rc;
-+
-+ if (!cpus_empty(cpu_possible_map))
-+ return;
-+
-+ for (i = 0; i < NR_CPUS; i++) {
-+ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
-+ if (rc == -ENOENT)
-+ break;
-+ cpu_set(i, cpu_possible_map);
-+ }
-+}
-+
-+void __init smp_alloc_memory(void)
-+{
-+}
-+
-+static void xen_smp_intr_init(unsigned int cpu)
-+{
-+ sprintf(resched_name[cpu], "resched%d", cpu);
-+ per_cpu(resched_irq, cpu) =
-+ bind_ipi_to_irqhandler(
-+ RESCHEDULE_VECTOR,
-+ cpu,
-+ smp_reschedule_interrupt,
-+ SA_INTERRUPT,
-+ resched_name[cpu],
-+ NULL);
-+ BUG_ON(per_cpu(resched_irq, cpu) < 0);
-+
-+ sprintf(callfunc_name[cpu], "callfunc%d", cpu);
-+ per_cpu(callfunc_irq, cpu) =
-+ bind_ipi_to_irqhandler(
-+ CALL_FUNCTION_VECTOR,
-+ cpu,
-+ smp_call_function_interrupt,
-+ SA_INTERRUPT,
-+ callfunc_name[cpu],
-+ NULL);
-+ BUG_ON(per_cpu(callfunc_irq, cpu) < 0);
-+
-+ if (cpu != 0)
-+ local_setup_timer(cpu);
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void xen_smp_intr_exit(unsigned int cpu)
-+{
-+ if (cpu != 0)
-+ local_teardown_timer(cpu);
-+
-+ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-+ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-+}
-+#endif
-+
-+static void cpu_bringup(void)
-+{
-+ cpu_init();
-+ touch_softlockup_watchdog();
-+ preempt_disable();
-+ local_irq_enable();
-+ cpu_idle();
-+}
-+
-+void vcpu_prepare(int vcpu)
-+{
-+ vcpu_guest_context_t ctxt;
-+ struct task_struct *idle = idle_task(vcpu);
-+#ifdef __x86_64__
-+ struct desc_ptr *gdt_descr = &cpu_gdt_descr[vcpu];
-+#else
-+ struct Xgt_desc_struct *gdt_descr = &per_cpu(cpu_gdt_descr, vcpu);
-+#endif
-+
-+ if (vcpu == 0)
-+ return;
-+
-+ memset(&ctxt, 0, sizeof(ctxt));
-+
-+ ctxt.flags = VGCF_IN_KERNEL;
-+ ctxt.user_regs.ds = __USER_DS;
-+ ctxt.user_regs.es = __USER_DS;
-+ ctxt.user_regs.fs = 0;
-+ ctxt.user_regs.gs = 0;
-+ ctxt.user_regs.ss = __KERNEL_DS;
-+ ctxt.user_regs.eip = (unsigned long)cpu_bringup;
-+ ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
-+
-+ memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
-+
-+ smp_trap_init(ctxt.trap_ctxt);
-+
-+ ctxt.ldt_ents = 0;
-+
-+ ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
-+ ctxt.gdt_ents = gdt_descr->size / 8;
-+
-+#ifdef __i386__
-+ ctxt.user_regs.cs = __KERNEL_CS;
-+ ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
-+
-+ ctxt.kernel_ss = __KERNEL_DS;
-+ ctxt.kernel_sp = idle->thread.esp0;
-+
-+ ctxt.event_callback_cs = __KERNEL_CS;
-+ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
-+ ctxt.failsafe_callback_cs = __KERNEL_CS;
-+ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-+
-+ ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
-+#else /* __x86_64__ */
-+ ctxt.user_regs.cs = __KERNEL_CS;
-+ ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
-+
-+ ctxt.kernel_ss = __KERNEL_DS;
-+ ctxt.kernel_sp = idle->thread.rsp0;
-+
-+ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
-+ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-+ ctxt.syscall_callback_eip = (unsigned long)system_call;
-+
-+ ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
-+
-+ ctxt.gs_base_kernel = (unsigned long)(cpu_pda(vcpu));
-+#endif
-+
-+ BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt));
-+}
-+
-+void __init smp_prepare_cpus(unsigned int max_cpus)
-+{
-+ int cpu;
-+ struct task_struct *idle;
-+#ifdef __x86_64__
-+ struct desc_ptr *gdt_descr;
-+#else
-+ struct Xgt_desc_struct *gdt_descr;
-+#endif
-+
-+ cpu_data[0] = boot_cpu_data;
-+
-+ cpu_2_logical_apicid[0] = 0;
-+ x86_cpu_to_apicid[0] = 0;
-+
-+ current_thread_info()->cpu = 0;
-+ cpu_sibling_map[0] = cpumask_of_cpu(0);
-+ cpu_core_map[0] = cpumask_of_cpu(0);
-+
-+ xen_smp_intr_init(0);
-+
-+ for_each_cpu_mask (cpu, cpu_possible_map) {
-+ if (cpu == 0)
-+ continue;
-+
-+#ifdef __x86_64__
-+ gdt_descr = &cpu_gdt_descr[cpu];
-+#else
-+ gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
-+#endif
-+ gdt_descr->address = get_zeroed_page(GFP_KERNEL);
-+ if (unlikely(!gdt_descr->address)) {
-+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
-+ continue;
-+ }
-+ gdt_descr->size = GDT_SIZE;
-+ memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
-+ make_page_readonly(
-+ (void *)gdt_descr->address,
-+ XENFEAT_writable_descriptor_tables);
-+
-+ cpu_data[cpu] = boot_cpu_data;
-+ cpu_2_logical_apicid[cpu] = cpu;
-+ x86_cpu_to_apicid[cpu] = cpu;
-+
-+ idle = fork_idle(cpu);
-+ if (IS_ERR(idle))
-+ panic("failed fork for CPU %d", cpu);
-+
-+#ifdef __x86_64__
-+ cpu_pda(cpu)->pcurrent = idle;
-+ cpu_pda(cpu)->cpunumber = cpu;
-+ clear_ti_thread_flag(idle->thread_info, TIF_FORK);
-+#endif
-+
-+ irq_ctx_init(cpu);
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+ if (xen_start_info->flags & SIF_INITDOMAIN)
-+ cpu_set(cpu, cpu_present_map);
-+#else
-+ cpu_set(cpu, cpu_present_map);
-+#endif
-+
-+ vcpu_prepare(cpu);
-+ }
-+
-+ /* Currently, Xen gives no dynamic NUMA/HT info. */
-+ for (cpu = 1; cpu < NR_CPUS; cpu++) {
-+ cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
-+ cpu_core_map[cpu] = cpumask_of_cpu(cpu);
-+ }
-+
-+#ifdef CONFIG_X86_IO_APIC
-+ /*
-+ * Here we can be sure that there is an IO-APIC in the system. Let's
-+ * go and set it up:
-+ */
-+ if (!skip_ioapic_setup && nr_ioapics)
-+ setup_IO_APIC();
-+#endif
-+}
-+
-+void __devinit smp_prepare_boot_cpu(void)
-+{
-+ prefill_possible_map();
-+ cpu_present_map = cpumask_of_cpu(0);
-+ cpu_online_map = cpumask_of_cpu(0);
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/*
-+ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
-+ * But do it early enough to catch critical for_each_present_cpu() loops
-+ * in i386-specific code.
-+ */
-+static int __init initialize_cpu_present_map(void)
-+{
-+ cpu_present_map = cpu_possible_map;
-+ return 0;
-+}
-+core_initcall(initialize_cpu_present_map);
-+
-+static void vcpu_hotplug(unsigned int cpu)
-+{
-+ int err;
-+ char dir[32], state[32];
-+
-+ if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
-+ return;
-+
-+ sprintf(dir, "cpu/%d", cpu);
-+ err = xenbus_scanf(XBT_NULL, dir, "availability", "%s", state);
-+ if (err != 1) {
-+ printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
-+ return;
-+ }
-+
-+ if (strcmp(state, "online") == 0) {
-+ (void)cpu_up(cpu);
-+ } else if (strcmp(state, "offline") == 0) {
-+ (void)cpu_down(cpu);
-+ } else {
-+ printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
-+ state, cpu);
-+ }
-+}
-+
-+static void handle_vcpu_hotplug_event(
-+ struct xenbus_watch *watch, const char **vec, unsigned int len)
-+{
-+ int cpu;
-+ char *cpustr;
-+ const char *node = vec[XS_WATCH_PATH];
-+
-+ if ((cpustr = strstr(node, "cpu/")) != NULL) {
-+ sscanf(cpustr, "cpu/%d", &cpu);
-+ vcpu_hotplug(cpu);
-+ }
-+}
-+
-+static int setup_cpu_watcher(struct notifier_block *notifier,
-+ unsigned long event, void *data)
-+{
-+ int i;
-+
-+ static struct xenbus_watch cpu_watch = {
-+ .node = "cpu",
-+ .callback = handle_vcpu_hotplug_event };
-+ (void)register_xenbus_watch(&cpu_watch);
-+
-+ if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
-+ for_each_cpu(i)
-+ vcpu_hotplug(i);
-+ printk(KERN_INFO "Brought up %ld CPUs\n",
-+ (long)num_online_cpus());
-+ }
-+
-+ return NOTIFY_DONE;
-+}
-+
-+static int __init setup_vcpu_hotplug_event(void)
-+{
-+ static struct notifier_block xsn_cpu = {
-+ .notifier_call = setup_cpu_watcher };
-+ register_xenstore_notifier(&xsn_cpu);
-+ return 0;
-+}
-+
-+arch_initcall(setup_vcpu_hotplug_event);
-+
-+int __cpu_disable(void)
-+{
-+ cpumask_t map = cpu_online_map;
-+ int cpu = smp_processor_id();
-+
-+ if (cpu == 0)
-+ return -EBUSY;
-+
-+ cpu_clear(cpu, map);
-+ fixup_irqs(map);
-+ cpu_clear(cpu, cpu_online_map);
-+
-+ return 0;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
-+ current->state = TASK_UNINTERRUPTIBLE;
-+ schedule_timeout(HZ/10);
-+ }
-+
-+ xen_smp_intr_exit(cpu);
-+
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+ if (num_online_cpus() == 1)
-+ unprepare_for_smp();
-+#endif
-+}
-+
-+#else /* !CONFIG_HOTPLUG_CPU */
-+
-+int __cpu_disable(void)
-+{
-+ return -ENOSYS;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+ BUG();
-+}
-+
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __devinit __cpu_up(unsigned int cpu)
-+{
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+ if (num_online_cpus() == 1)
-+ prepare_for_smp();
-+#endif
-+
-+ xen_smp_intr_init(cpu);
-+ cpu_set(cpu, cpu_online_map);
-+ if (HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL) != 0)
-+ BUG();
-+
-+ return 0;
-+}
-+
-+void __init smp_cpus_done(unsigned int max_cpus)
-+{
-+}
-+
-+#ifndef CONFIG_X86_LOCAL_APIC
-+int setup_profiling_timer(unsigned int multiplier)
-+{
-+ return -EINVAL;
-+}
-+#endif
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/core/xen_proc.c b/drivers/xen/core/xen_proc.c
-new file mode 100644
-index 0000000..c5a8c14
---- /dev/null
-+++ b/drivers/xen/core/xen_proc.c
-@@ -0,0 +1,29 @@
-+
-+#include <linux/config.h>
-+#include <linux/proc_fs.h>
-+#include <xen/xen_proc.h>
-+
-+static struct proc_dir_entry *xen_base;
-+
-+struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
-+{
-+ if ( xen_base == NULL )
-+ if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
-+ panic("Couldn't create /proc/xen");
-+ return create_proc_entry(name, mode, xen_base);
-+}
-+
-+void remove_xen_proc_entry(const char *name)
-+{
-+ remove_proc_entry(name, xen_base);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/evtchn/Makefile b/drivers/xen/evtchn/Makefile
-new file mode 100644
-index 0000000..7b082a0
---- /dev/null
-+++ b/drivers/xen/evtchn/Makefile
-@@ -0,0 +1,2 @@
-+
-+obj-y := evtchn.o
-diff --git a/drivers/xen/evtchn/evtchn.c b/drivers/xen/evtchn/evtchn.c
-new file mode 100644
-index 0000000..7222429
---- /dev/null
-+++ b/drivers/xen/evtchn/evtchn.c
-@@ -0,0 +1,459 @@
-+/******************************************************************************
-+ * evtchn.c
-+ *
-+ * Driver for receiving and demuxing event-channel signals.
-+ *
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * Multi-process extensions Copyright (c) 2004, Steven Smith
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/fs.h>
-+#include <linux/errno.h>
-+#include <linux/miscdevice.h>
-+#include <linux/major.h>
-+#include <linux/proc_fs.h>
-+#include <linux/stat.h>
-+#include <linux/poll.h>
-+#include <linux/irq.h>
-+#include <linux/init.h>
-+#include <linux/gfp.h>
-+#include <xen/evtchn.h>
-+#include <xen/public/evtchn.h>
-+
-+struct per_user_data {
-+ /* Notification ring, accessed via /dev/xen/evtchn. */
-+#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
-+#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
-+ evtchn_port_t *ring;
-+ unsigned int ring_cons, ring_prod, ring_overflow;
-+
-+ /* Processes wait on this queue when ring is empty. */
-+ wait_queue_head_t evtchn_wait;
-+ struct fasync_struct *evtchn_async_queue;
-+};
-+
-+/* Who's bound to each port? */
-+static struct per_user_data *port_user[NR_EVENT_CHANNELS];
-+static spinlock_t port_user_lock;
-+
-+void evtchn_device_upcall(int port)
-+{
-+ struct per_user_data *u;
-+
-+ spin_lock(&port_user_lock);
-+
-+ mask_evtchn(port);
-+ clear_evtchn(port);
-+
-+ if ((u = port_user[port]) != NULL) {
-+ if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
-+ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
-+ if (u->ring_cons == u->ring_prod++) {
-+ wake_up_interruptible(&u->evtchn_wait);
-+ kill_fasync(&u->evtchn_async_queue,
-+ SIGIO, POLL_IN);
-+ }
-+ } else {
-+ u->ring_overflow = 1;
-+ }
-+ }
-+
-+ spin_unlock(&port_user_lock);
-+}
-+
-+static ssize_t evtchn_read(struct file *file, char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ int rc;
-+ unsigned int c, p, bytes1 = 0, bytes2 = 0;
-+ struct per_user_data *u = file->private_data;
-+
-+ /* Whole number of ports. */
-+ count &= ~(sizeof(evtchn_port_t)-1);
-+
-+ if (count == 0)
-+ return 0;
-+
-+ if (count > PAGE_SIZE)
-+ count = PAGE_SIZE;
-+
-+ for (;;) {
-+ if (u->ring_overflow)
-+ return -EFBIG;
-+
-+ if ((c = u->ring_cons) != (p = u->ring_prod))
-+ break;
-+
-+ if (file->f_flags & O_NONBLOCK)
-+ return -EAGAIN;
-+
-+ rc = wait_event_interruptible(
-+ u->evtchn_wait, u->ring_cons != u->ring_prod);
-+ if (rc)
-+ return rc;
-+ }
-+
-+ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
-+ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
-+ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
-+ sizeof(evtchn_port_t);
-+ bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
-+ } else {
-+ bytes1 = (p - c) * sizeof(evtchn_port_t);
-+ bytes2 = 0;
-+ }
-+
-+ /* Truncate chunks according to caller's maximum byte count. */
-+ if (bytes1 > count) {
-+ bytes1 = count;
-+ bytes2 = 0;
-+ } else if ((bytes1 + bytes2) > count) {
-+ bytes2 = count - bytes1;
-+ }
-+
-+ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
-+ ((bytes2 != 0) &&
-+ copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
-+ return -EFAULT;
-+
-+ u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
-+
-+ return bytes1 + bytes2;
-+}
-+
-+static ssize_t evtchn_write(struct file *file, const char __user *buf,
-+ size_t count, loff_t *ppos)
-+{
-+ int rc, i;
-+ evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-+ struct per_user_data *u = file->private_data;
-+
-+ if (kbuf == NULL)
-+ return -ENOMEM;
-+
-+ /* Whole number of ports. */
-+ count &= ~(sizeof(evtchn_port_t)-1);
-+
-+ if (count == 0) {
-+ rc = 0;
-+ goto out;
-+ }
-+
-+ if (count > PAGE_SIZE)
-+ count = PAGE_SIZE;
-+
-+ if (copy_from_user(kbuf, buf, count) != 0) {
-+ rc = -EFAULT;
-+ goto out;
-+ }
-+
-+ spin_lock_irq(&port_user_lock);
-+ for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
-+ if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
-+ unmask_evtchn(kbuf[i]);
-+ spin_unlock_irq(&port_user_lock);
-+
-+ rc = count;
-+
-+ out:
-+ free_page((unsigned long)kbuf);
-+ return rc;
-+}
-+
-+static void evtchn_bind_to_user(struct per_user_data *u, int port)
-+{
-+ spin_lock_irq(&port_user_lock);
-+ BUG_ON(port_user[port] != NULL);
-+ port_user[port] = u;
-+ unmask_evtchn(port);
-+ spin_unlock_irq(&port_user_lock);
-+}
-+
-+static int evtchn_ioctl(struct inode *inode, struct file *file,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ int rc;
-+ struct per_user_data *u = file->private_data;
-+ void __user *uarg = (void __user *) arg;
-+ evtchn_op_t op = { 0 };
-+
-+ switch (cmd) {
-+ case IOCTL_EVTCHN_BIND_VIRQ: {
-+ struct ioctl_evtchn_bind_virq bind;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&bind, uarg, sizeof(bind)))
-+ break;
-+
-+ op.cmd = EVTCHNOP_bind_virq;
-+ op.u.bind_virq.virq = bind.virq;
-+ op.u.bind_virq.vcpu = 0;
-+ rc = HYPERVISOR_event_channel_op(&op);
-+ if (rc != 0)
-+ break;
-+
-+ rc = op.u.bind_virq.port;
-+ evtchn_bind_to_user(u, rc);
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
-+ struct ioctl_evtchn_bind_interdomain bind;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&bind, uarg, sizeof(bind)))
-+ break;
-+
-+ op.cmd = EVTCHNOP_bind_interdomain;
-+ op.u.bind_interdomain.remote_dom = bind.remote_domain;
-+ op.u.bind_interdomain.remote_port = bind.remote_port;
-+ rc = HYPERVISOR_event_channel_op(&op);
-+ if (rc != 0)
-+ break;
-+
-+ rc = op.u.bind_interdomain.local_port;
-+ evtchn_bind_to_user(u, rc);
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
-+ struct ioctl_evtchn_bind_unbound_port bind;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&bind, uarg, sizeof(bind)))
-+ break;
-+
-+ op.cmd = EVTCHNOP_alloc_unbound;
-+ op.u.alloc_unbound.dom = DOMID_SELF;
-+ op.u.alloc_unbound.remote_dom = bind.remote_domain;
-+ rc = HYPERVISOR_event_channel_op(&op);
-+ if (rc != 0)
-+ break;
-+
-+ rc = op.u.alloc_unbound.port;
-+ evtchn_bind_to_user(u, rc);
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_UNBIND: {
-+ struct ioctl_evtchn_unbind unbind;
-+ int ret;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&unbind, uarg, sizeof(unbind)))
-+ break;
-+
-+ rc = -EINVAL;
-+ if (unbind.port >= NR_EVENT_CHANNELS)
-+ break;
-+
-+ spin_lock_irq(&port_user_lock);
-+
-+ rc = -ENOTCONN;
-+ if (port_user[unbind.port] != u) {
-+ spin_unlock_irq(&port_user_lock);
-+ break;
-+ }
-+
-+ port_user[unbind.port] = NULL;
-+ mask_evtchn(unbind.port);
-+
-+ spin_unlock_irq(&port_user_lock);
-+
-+ op.cmd = EVTCHNOP_close;
-+ op.u.close.port = unbind.port;
-+ ret = HYPERVISOR_event_channel_op(&op);
-+ BUG_ON(ret);
-+
-+ rc = 0;
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_NOTIFY: {
-+ struct ioctl_evtchn_notify notify;
-+
-+ rc = -EFAULT;
-+ if (copy_from_user(&notify, uarg, sizeof(notify)))
-+ break;
-+
-+ if (notify.port >= NR_EVENT_CHANNELS) {
-+ rc = -EINVAL;
-+ } else if (port_user[notify.port] != u) {
-+ rc = -ENOTCONN;
-+ } else {
-+ notify_remote_via_evtchn(notify.port);
-+ rc = 0;
-+ }
-+ break;
-+ }
-+
-+ case IOCTL_EVTCHN_RESET: {
-+ /* Initialise the ring to empty. Clear errors. */
-+ spin_lock_irq(&port_user_lock);
-+ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
-+ spin_unlock_irq(&port_user_lock);
-+ rc = 0;
-+ break;
-+ }
-+
-+ default:
-+ rc = -ENOSYS;
-+ break;
-+ }
-+
-+ return rc;
-+}
-+
-+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
-+{
-+ unsigned int mask = POLLOUT | POLLWRNORM;
-+ struct per_user_data *u = file->private_data;
-+
-+ poll_wait(file, &u->evtchn_wait, wait);
-+ if (u->ring_cons != u->ring_prod)
-+ mask |= POLLIN | POLLRDNORM;
-+ if (u->ring_overflow)
-+ mask = POLLERR;
-+ return mask;
-+}
-+
-+static int evtchn_fasync(int fd, struct file *filp, int on)
-+{
-+ struct per_user_data *u = filp->private_data;
-+ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
-+}
-+
-+static int evtchn_open(struct inode *inode, struct file *filp)
-+{
-+ struct per_user_data *u;
-+
-+ if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
-+ return -ENOMEM;
-+
-+ memset(u, 0, sizeof(*u));
-+ init_waitqueue_head(&u->evtchn_wait);
-+
-+ u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-+ if (u->ring == NULL) {
-+ kfree(u);
-+ return -ENOMEM;
-+ }
-+
-+ filp->private_data = u;
-+
-+ return 0;
-+}
-+
-+static int evtchn_release(struct inode *inode, struct file *filp)
-+{
-+ int i;
-+ struct per_user_data *u = filp->private_data;
-+ evtchn_op_t op = { 0 };
-+
-+ spin_lock_irq(&port_user_lock);
-+
-+ free_page((unsigned long)u->ring);
-+
-+ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
-+ int ret;
-+ if (port_user[i] != u)
-+ continue;
-+
-+ port_user[i] = NULL;
-+ mask_evtchn(i);
-+
-+ op.cmd = EVTCHNOP_close;
-+ op.u.close.port = i;
-+ ret = HYPERVISOR_event_channel_op(&op);
-+ BUG_ON(ret);
-+ }
-+
-+ spin_unlock_irq(&port_user_lock);
-+
-+ kfree(u);
-+
-+ return 0;
-+}
-+
-+static struct file_operations evtchn_fops = {
-+ .owner = THIS_MODULE,
-+ .read = evtchn_read,
-+ .write = evtchn_write,
-+ .ioctl = evtchn_ioctl,
-+ .poll = evtchn_poll,
-+ .fasync = evtchn_fasync,
-+ .open = evtchn_open,
-+ .release = evtchn_release,
-+};
-+
-+static struct miscdevice evtchn_miscdev = {
-+ .minor = EVTCHN_MINOR,
-+ .name = "evtchn",
-+ .fops = &evtchn_fops,
-+ .devfs_name = "misc/evtchn",
-+};
-+
-+static int __init evtchn_init(void)
-+{
-+ int err;
-+
-+ spin_lock_init(&port_user_lock);
-+ memset(port_user, 0, sizeof(port_user));
-+
-+ /* Create '/dev/misc/evtchn'. */
-+ err = misc_register(&evtchn_miscdev);
-+ if (err != 0) {
-+ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
-+ return err;
-+ }
-+
-+ printk("Event-channel device installed.\n");
-+
-+ return 0;
-+}
-+
-+static void evtchn_cleanup(void)
-+{
-+ misc_deregister(&evtchn_miscdev);
-+}
-+
-+module_init(evtchn_init);
-+module_exit(evtchn_cleanup);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/net_driver_util.c b/drivers/xen/net_driver_util.c
-new file mode 100644
-index 0000000..10688f9
---- /dev/null
-+++ b/drivers/xen/net_driver_util.c
-@@ -0,0 +1,67 @@
-+/*****************************************************************************
-+ *
-+ * Utility functions for Xen network devices.
-+ *
-+ * Copyright (c) 2005 XenSource Ltd.
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following
-+ * license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject
-+ * to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+
-+#include <linux/if_ether.h>
-+#include <linux/err.h>
-+#include <linux/module.h>
-+#include <xen/net_driver_util.h>
-+
-+
-+int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
-+{
-+ char *s;
-+ int i;
-+ char *e;
-+ char *macstr = xenbus_read(XBT_NULL, dev->nodename, "mac", NULL);
-+ if (IS_ERR(macstr))
-+ return PTR_ERR(macstr);
-+ s = macstr;
-+ for (i = 0; i < ETH_ALEN; i++) {
-+ mac[i] = simple_strtoul(s, &e, 16);
-+ if (s == e || (e[0] != ':' && e[0] != 0)) {
-+ kfree(macstr);
-+ return -ENOENT;
-+ }
-+ s = &e[1];
-+ }
-+ kfree(macstr);
-+ return 0;
-+}
-+EXPORT_SYMBOL(xen_net_read_mac);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/netback/Makefile b/drivers/xen/netback/Makefile
-new file mode 100644
-index 0000000..d5d2328
---- /dev/null
-+++ b/drivers/xen/netback/Makefile
-@@ -0,0 +1,5 @@
-+obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
-+obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o
-+
-+netbk-y := netback.o xenbus.o interface.o
-+netloop-y := loopback.o
-diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
-new file mode 100644
-index 0000000..61f8d58
---- /dev/null
-+++ b/drivers/xen/netback/common.h
-@@ -0,0 +1,110 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/common.h
-+ */
-+
-+#ifndef __NETIF__BACKEND__COMMON_H__
-+#define __NETIF__BACKEND__COMMON_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/ip.h>
-+#include <linux/in.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/io/netif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+#include <xen/driver_util.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "xen_net: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_net: " fmt, ##args)
-+
-+typedef struct netif_st {
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+
-+ u8 fe_dev_addr[6];
-+
-+ /* Physical parameters of the comms window. */
-+ grant_handle_t tx_shmem_handle;
-+ grant_ref_t tx_shmem_ref;
-+ grant_handle_t rx_shmem_handle;
-+ grant_ref_t rx_shmem_ref;
-+ unsigned int evtchn;
-+ unsigned int irq;
-+
-+ /* The shared rings and indexes. */
-+ netif_tx_back_ring_t tx;
-+ netif_rx_back_ring_t rx;
-+ struct vm_struct *tx_comms_area;
-+ struct vm_struct *rx_comms_area;
-+
-+ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
-+ RING_IDX rx_req_cons_peek;
-+
-+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-+ unsigned long credit_bytes;
-+ unsigned long credit_usec;
-+ unsigned long remaining_credit;
-+ struct timer_list credit_timeout;
-+
-+ /* Miscellaneous private stuff. */
-+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-+ int active;
-+ struct list_head list; /* scheduling list */
-+ atomic_t refcnt;
-+ struct net_device *dev;
-+ struct net_device_stats stats;
-+
-+ struct work_struct free_work;
-+} netif_t;
-+
-+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
-+
-+void netif_creditlimit(netif_t *netif);
-+void netif_disconnect(netif_t *netif);
-+
-+netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]);
-+void free_netif(netif_t *netif);
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+ unsigned long rx_ring_ref, unsigned int evtchn);
-+
-+#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define netif_put(_b) \
-+ do { \
-+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-+ free_netif(_b); \
-+ } while (0)
-+
-+void netif_xenbus_init(void);
-+
-+void netif_schedule_work(netif_t *netif);
-+void netif_deschedule_work(netif_t *netif);
-+
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+
-+#endif /* __NETIF__BACKEND__COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
-new file mode 100644
-index 0000000..a27533c
---- /dev/null
-+++ b/drivers/xen/netback/interface.c
-@@ -0,0 +1,320 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/interface.c
-+ *
-+ * Network-device interface management.
-+ *
-+ * Copyright (c) 2004-2005, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <linux/rtnetlink.h>
-+
-+static void __netif_up(netif_t *netif)
-+{
-+ struct net_device *dev = netif->dev;
-+ spin_lock_bh(&dev->xmit_lock);
-+ netif->active = 1;
-+ spin_unlock_bh(&dev->xmit_lock);
-+ enable_irq(netif->irq);
-+ netif_schedule_work(netif);
-+}
-+
-+static void __netif_down(netif_t *netif)
-+{
-+ struct net_device *dev = netif->dev;
-+ disable_irq(netif->irq);
-+ spin_lock_bh(&dev->xmit_lock);
-+ netif->active = 0;
-+ spin_unlock_bh(&dev->xmit_lock);
-+ netif_deschedule_work(netif);
-+}
-+
-+static int net_open(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ if (netif->status == CONNECTED)
-+ __netif_up(netif);
-+ netif_start_queue(dev);
-+ return 0;
-+}
-+
-+static int net_close(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ netif_stop_queue(dev);
-+ if (netif->status == CONNECTED)
-+ __netif_down(netif);
-+ return 0;
-+}
-+
-+netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN])
-+{
-+ int err = 0, i;
-+ struct net_device *dev;
-+ netif_t *netif;
-+ char name[IFNAMSIZ] = {};
-+
-+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-+ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-+ if (dev == NULL) {
-+ DPRINTK("Could not create netif: out of memory\n");
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ netif = netdev_priv(dev);
-+ memset(netif, 0, sizeof(*netif));
-+ netif->domid = domid;
-+ netif->handle = handle;
-+ netif->status = DISCONNECTED;
-+ atomic_set(&netif->refcnt, 0);
-+ netif->dev = dev;
-+
-+ netif->credit_bytes = netif->remaining_credit = ~0UL;
-+ netif->credit_usec = 0UL;
-+ init_timer(&netif->credit_timeout);
-+
-+ dev->hard_start_xmit = netif_be_start_xmit;
-+ dev->get_stats = netif_be_get_stats;
-+ dev->open = net_open;
-+ dev->stop = net_close;
-+ dev->features = NETIF_F_NO_CSUM;
-+
-+ /* Disable queuing. */
-+ dev->tx_queue_len = 0;
-+
-+ for (i = 0; i < ETH_ALEN; i++)
-+ if (be_mac[i] != 0)
-+ break;
-+ if (i == ETH_ALEN) {
-+ /*
-+ * Initialise a dummy MAC address. We choose the numerically
-+ * largest non-broadcast address to prevent the address getting
-+ * stolen by an Ethernet bridge for STP purposes.
-+ * (FE:FF:FF:FF:FF:FF)
-+ */
-+ memset(dev->dev_addr, 0xFF, ETH_ALEN);
-+ dev->dev_addr[0] &= ~0x01;
-+ } else
-+ memcpy(dev->dev_addr, be_mac, ETH_ALEN);
-+
-+ rtnl_lock();
-+ err = register_netdevice(dev);
-+ rtnl_unlock();
-+ if (err) {
-+ DPRINTK("Could not register new net device %s: err=%d\n",
-+ dev->name, err);
-+ free_netdev(dev);
-+ return ERR_PTR(err);
-+ }
-+
-+ DPRINTK("Successfully created netif\n");
-+ return netif;
-+}
-+
-+static int map_frontend_pages(
-+ netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
-+{
-+ struct gnttab_map_grant_ref op;
-+ int ret;
-+
-+ op.host_addr = (unsigned long)netif->tx_comms_area->addr;
-+ op.flags = GNTMAP_host_map;
-+ op.ref = tx_ring_ref;
-+ op.dom = netif->domid;
-+
-+ lock_vm_area(netif->tx_comms_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+ unlock_vm_area(netif->tx_comms_area);
-+ BUG_ON(ret);
-+
-+ if (op.status) {
-+ DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
-+ return op.status;
-+ }
-+
-+ netif->tx_shmem_ref = tx_ring_ref;
-+ netif->tx_shmem_handle = op.handle;
-+
-+ op.host_addr = (unsigned long)netif->rx_comms_area->addr;
-+ op.flags = GNTMAP_host_map;
-+ op.ref = rx_ring_ref;
-+ op.dom = netif->domid;
-+
-+ lock_vm_area(netif->rx_comms_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+ unlock_vm_area(netif->rx_comms_area);
-+ BUG_ON(ret);
-+
-+ if (op.status) {
-+ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
-+ return op.status;
-+ }
-+
-+ netif->rx_shmem_ref = rx_ring_ref;
-+ netif->rx_shmem_handle = op.handle;
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_pages(netif_t *netif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+ int ret;
-+
-+ op.host_addr = (unsigned long)netif->tx_comms_area->addr;
-+ op.handle = netif->tx_shmem_handle;
-+ op.dev_bus_addr = 0;
-+
-+ lock_vm_area(netif->tx_comms_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+ unlock_vm_area(netif->tx_comms_area);
-+ BUG_ON(ret);
-+
-+ op.host_addr = (unsigned long)netif->rx_comms_area->addr;
-+ op.handle = netif->rx_shmem_handle;
-+ op.dev_bus_addr = 0;
-+
-+ lock_vm_area(netif->rx_comms_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+ unlock_vm_area(netif->rx_comms_area);
-+ BUG_ON(ret);
-+}
-+
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+ unsigned long rx_ring_ref, unsigned int evtchn)
-+{
-+ int err = -ENOMEM;
-+ netif_tx_sring_t *txs;
-+ netif_rx_sring_t *rxs;
-+ evtchn_op_t op = {
-+ .cmd = EVTCHNOP_bind_interdomain,
-+ .u.bind_interdomain.remote_dom = netif->domid,
-+ .u.bind_interdomain.remote_port = evtchn };
-+
-+ /* Already connected through? */
-+ if (netif->irq)
-+ return 0;
-+
-+ netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
-+ if (netif->tx_comms_area == NULL)
-+ return -ENOMEM;
-+ netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
-+ if (netif->rx_comms_area == NULL)
-+ goto err_rx;
-+
-+ err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
-+ if (err)
-+ goto err_map;
-+
-+ err = HYPERVISOR_event_channel_op(&op);
-+ if (err)
-+ goto err_hypervisor;
-+
-+ netif->evtchn = op.u.bind_interdomain.local_port;
-+
-+ netif->irq = bind_evtchn_to_irqhandler(
-+ netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
-+ disable_irq(netif->irq);
-+
-+ txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
-+ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
-+
-+ rxs = (netif_rx_sring_t *)
-+ ((char *)netif->rx_comms_area->addr);
-+ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
-+
-+ netif->rx_req_cons_peek = 0;
-+
-+ netif_get(netif);
-+ wmb(); /* Other CPUs see new state before interface is started. */
-+
-+ rtnl_lock();
-+ netif->status = CONNECTED;
-+ wmb();
-+ if (netif_running(netif->dev))
-+ __netif_up(netif);
-+ rtnl_unlock();
-+
-+ return 0;
-+err_hypervisor:
-+ unmap_frontend_pages(netif);
-+err_map:
-+ free_vm_area(netif->rx_comms_area);
-+err_rx:
-+ free_vm_area(netif->tx_comms_area);
-+ return err;
-+}
-+
-+static void free_netif_callback(void *arg)
-+{
-+ netif_t *netif = (netif_t *)arg;
-+
-+ if (netif->irq)
-+ unbind_from_irqhandler(netif->irq, netif);
-+
-+ unregister_netdev(netif->dev);
-+
-+ if (netif->tx.sring) {
-+ unmap_frontend_pages(netif);
-+ free_vm_area(netif->tx_comms_area);
-+ free_vm_area(netif->rx_comms_area);
-+ }
-+
-+ free_netdev(netif->dev);
-+}
-+
-+void free_netif(netif_t *netif)
-+{
-+ INIT_WORK(&netif->free_work, free_netif_callback, (void *)netif);
-+ schedule_work(&netif->free_work);
-+}
-+
-+void netif_creditlimit(netif_t *netif)
-+{
-+#if 0
-+ /* Set the credit limit (reset remaining credit to new limit). */
-+ netif->credit_bytes = creditlimit->credit_bytes;
-+ netif->remaining_credit = creditlimit->credit_bytes;
-+ netif->credit_usec = creditlimit->period_usec;
-+
-+ if (netif->status == CONNECTED) {
-+ /*
-+ * Schedule work so that any packets waiting under previous
-+ * credit limit are dealt with (acts as a replenishment point).
-+ */
-+ netif->credit_timeout.expires = jiffies;
-+ netif_schedule_work(netif);
-+ }
-+#endif
-+}
-+
-+void netif_disconnect(netif_t *netif)
-+{
-+ switch (netif->status) {
-+ case CONNECTED:
-+ rtnl_lock();
-+ netif->status = DISCONNECTING;
-+ wmb();
-+ if (netif_running(netif->dev))
-+ __netif_down(netif);
-+ rtnl_unlock();
-+ netif_put(netif);
-+ break;
-+ case DISCONNECTED:
-+ BUG_ON(atomic_read(&netif->refcnt) != 0);
-+ free_netif(netif);
-+ break;
-+ default:
-+ BUG();
-+ }
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/netback/loopback.c b/drivers/xen/netback/loopback.c
-new file mode 100644
-index 0000000..a5a11cc
---- /dev/null
-+++ b/drivers/xen/netback/loopback.c
-@@ -0,0 +1,231 @@
-+/******************************************************************************
-+ * netback/loopback.c
-+ *
-+ * A two-interface loopback device to emulate a local netfront-netback
-+ * connection. This ensures that local packet delivery looks identical
-+ * to inter-domain delivery. Most importantly, packets delivered locally
-+ * originating from other domains will get *copied* when they traverse this
-+ * driver. This prevents unbounded delays in socket-buffer queues from
-+ * causing the netback driver to "seize up".
-+ *
-+ * This driver creates a symmetric pair of loopback interfaces with names
-+ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
-+ * bridge, just like a proper netback interface, while a local IP interface
-+ * is configured on 'veth0'.
-+ *
-+ * As with a real netback interface, vif0.0 is configured with a suitable
-+ * dummy MAC address. No default is provided for veth0: a reasonable strategy
-+ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
-+ * (to avoid confusing the Etherbridge).
-+ *
-+ * Copyright (c) 2005 K A Fraser
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/ethtool.h>
-+#include <net/dst.h>
-+
-+static int nloopbacks = 8;
-+module_param(nloopbacks, int, 0);
-+MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
-+
-+struct net_private {
-+ struct net_device *loopback_dev;
-+ struct net_device_stats stats;
-+};
-+
-+static int loopback_open(struct net_device *dev)
-+{
-+ struct net_private *np = netdev_priv(dev);
-+ memset(&np->stats, 0, sizeof(np->stats));
-+ netif_start_queue(dev);
-+ return 0;
-+}
-+
-+static int loopback_close(struct net_device *dev)
-+{
-+ netif_stop_queue(dev);
-+ return 0;
-+}
-+
-+static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+ struct net_private *np = netdev_priv(dev);
-+
-+ dst_release(skb->dst);
-+ skb->dst = NULL;
-+
-+ skb_orphan(skb);
-+
-+ np->stats.tx_bytes += skb->len;
-+ np->stats.tx_packets++;
-+
-+ /* Switch to loopback context. */
-+ dev = np->loopback_dev;
-+ np = netdev_priv(dev);
-+
-+ np->stats.rx_bytes += skb->len;
-+ np->stats.rx_packets++;
-+
-+ if (skb->ip_summed == CHECKSUM_HW) {
-+ /* Defer checksum calculation. */
-+ skb->proto_csum_blank = 1;
-+ /* Must be a local packet: assert its integrity. */
-+ skb->proto_csum_valid = 1;
-+ }
-+
-+ skb->ip_summed = skb->proto_csum_valid ?
-+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
-+
-+ skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
-+ skb->protocol = eth_type_trans(skb, dev);
-+ skb->dev = dev;
-+ dev->last_rx = jiffies;
-+ netif_rx(skb);
-+
-+ return 0;
-+}
-+
-+static struct net_device_stats *loopback_get_stats(struct net_device *dev)
-+{
-+ struct net_private *np = netdev_priv(dev);
-+ return &np->stats;
-+}
-+
-+static void loopback_construct(struct net_device *dev, struct net_device *lo)
-+{
-+ struct net_private *np = netdev_priv(dev);
-+
-+ np->loopback_dev = lo;
-+
-+ dev->open = loopback_open;
-+ dev->stop = loopback_close;
-+ dev->hard_start_xmit = loopback_start_xmit;
-+ dev->get_stats = loopback_get_stats;
-+
-+ dev->tx_queue_len = 0;
-+
-+ dev->features = NETIF_F_HIGHDMA | NETIF_F_LLTX;
-+
-+ /*
-+ * We do not set a jumbo MTU on the interface. Otherwise the network
-+ * stack will try to send large packets that will get dropped by the
-+ * Ethernet bridge (unless the physical Ethernet interface is
-+ * configured to transfer jumbo packets). If a larger MTU is desired
-+ * then the system administrator can specify it using the 'ifconfig'
-+ * command.
-+ */
-+ /*dev->mtu = 16*1024;*/
-+}
-+
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+ .get_tx_csum = ethtool_op_get_tx_csum,
-+ .set_tx_csum = ethtool_op_set_tx_csum,
-+};
-+
-+static int __init make_loopback(int i)
-+{
-+ struct net_device *dev1, *dev2;
-+ char dev_name[IFNAMSIZ];
-+ int err = -ENOMEM;
-+
-+ sprintf(dev_name, "vif0.%d", i);
-+ dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
-+ if (!dev1)
-+ return err;
-+
-+ sprintf(dev_name, "veth%d", i);
-+ dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
-+ if (!dev2)
-+ goto fail_netdev2;
-+
-+ loopback_construct(dev1, dev2);
-+ loopback_construct(dev2, dev1);
-+
-+ dev1->features |= NETIF_F_NO_CSUM;
-+ dev2->features |= NETIF_F_IP_CSUM;
-+
-+ SET_ETHTOOL_OPS(dev2, &network_ethtool_ops);
-+
-+ /*
-+ * Initialise a dummy MAC address for the 'dummy backend' interface. We
-+ * choose the numerically largest non-broadcast address to prevent the
-+ * address getting stolen by an Ethernet bridge for STP purposes.
-+ */
-+ memset(dev1->dev_addr, 0xFF, ETH_ALEN);
-+ dev1->dev_addr[0] &= ~0x01;
-+
-+ if ((err = register_netdev(dev1)) != 0)
-+ goto fail;
-+
-+ if ((err = register_netdev(dev2)) != 0) {
-+ unregister_netdev(dev1);
-+ goto fail;
-+ }
-+
-+ return 0;
-+
-+ fail:
-+ free_netdev(dev2);
-+ fail_netdev2:
-+ free_netdev(dev1);
-+ return err;
-+}
-+
-+static void __init clean_loopback(int i)
-+{
-+ struct net_device *dev1, *dev2;
-+ char dev_name[IFNAMSIZ];
-+
-+ sprintf(dev_name, "vif0.%d", i);
-+ dev1 = dev_get_by_name(dev_name);
-+ sprintf(dev_name, "veth%d", i);
-+ dev2 = dev_get_by_name(dev_name);
-+ if (dev1 && dev2) {
-+ unregister_netdev(dev2);
-+ unregister_netdev(dev1);
-+ free_netdev(dev2);
-+ free_netdev(dev1);
-+ }
-+}
-+
-+static int __init loopback_init(void)
-+{
-+ int i, err = 0;
-+
-+ for (i = 0; i < nloopbacks; i++)
-+ if ((err = make_loopback(i)) != 0)
-+ break;
-+
-+ return err;
-+}
-+
-+module_init(loopback_init);
-+
-+static void __exit loopback_exit(void)
-+{
-+ int i;
-+
-+ for (i = nloopbacks; i-- > 0; )
-+ clean_loopback(i);
-+}
-+
-+module_exit(loopback_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
-new file mode 100644
-index 0000000..64173fa
---- /dev/null
-+++ b/drivers/xen/netback/netback.c
-@@ -0,0 +1,835 @@
-+/******************************************************************************
-+ * drivers/xen/netback/netback.c
-+ *
-+ * Back-end of the driver for virtual network devices. This portion of the
-+ * driver exports a 'unified' network-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A
-+ * reference front-end implementation can be found in:
-+ * drivers/xen/netfront/netfront.c
-+ *
-+ * Copyright (c) 2002-2005, K A Fraser
-+ */
-+
-+#include "common.h"
-+#include <xen/balloon.h>
-+#include <xen/interface/memory.h>
-+
-+/*#define NETBE_DEBUG_INTERRUPT*/
-+
-+static void netif_idx_release(u16 pending_idx);
-+static void netif_page_release(struct page *page);
-+static void make_tx_response(netif_t *netif,
-+ u16 id,
-+ s8 st);
-+static int make_rx_response(netif_t *netif,
-+ u16 id,
-+ s8 st,
-+ u16 offset,
-+ u16 size,
-+ u16 flags);
-+
-+static void net_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
-+
-+static void net_rx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
-+
-+static struct timer_list net_timer;
-+
-+#define MAX_PENDING_REQS 256
-+
-+static struct sk_buff_head rx_queue;
-+static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
-+static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-+static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE];
-+static unsigned char rx_notify[NR_IRQS];
-+
-+static unsigned long mmap_vstart;
-+#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
-+
-+#define PKT_PROT_LEN 64
-+
-+static struct {
-+ netif_tx_request_t req;
-+ netif_t *netif;
-+} pending_tx_info[MAX_PENDING_REQS];
-+static u16 pending_ring[MAX_PENDING_REQS];
-+typedef unsigned int PEND_RING_IDX;
-+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-+static PEND_RING_IDX pending_prod, pending_cons;
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-+
-+/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-+static u16 dealloc_ring[MAX_PENDING_REQS];
-+static PEND_RING_IDX dealloc_prod, dealloc_cons;
-+
-+static struct sk_buff_head tx_queue;
-+
-+static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
-+static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
-+static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
-+
-+static struct list_head net_schedule_list;
-+static spinlock_t net_schedule_list_lock;
-+
-+#define MAX_MFN_ALLOC 64
-+static unsigned long mfn_list[MAX_MFN_ALLOC];
-+static unsigned int alloc_index = 0;
-+static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
-+
-+static unsigned long alloc_mfn(void)
-+{
-+ unsigned long mfn = 0, flags;
-+ struct xen_memory_reservation reservation = {
-+ .extent_start = mfn_list,
-+ .nr_extents = MAX_MFN_ALLOC,
-+ .extent_order = 0,
-+ .domid = DOMID_SELF
-+ };
-+ spin_lock_irqsave(&mfn_lock, flags);
-+ if ( unlikely(alloc_index == 0) )
-+ alloc_index = HYPERVISOR_memory_op(
-+ XENMEM_increase_reservation, &reservation);
-+ if ( alloc_index != 0 )
-+ mfn = mfn_list[--alloc_index];
-+ spin_unlock_irqrestore(&mfn_lock, flags);
-+ return mfn;
-+}
-+
-+static inline void maybe_schedule_tx_action(void)
-+{
-+ smp_mb();
-+ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-+ !list_empty(&net_schedule_list))
-+ tasklet_schedule(&net_tx_tasklet);
-+}
-+
-+/*
-+ * A gross way of confirming the origin of an skb data page. The slab
-+ * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
-+ */
-+static inline int is_xen_skb(struct sk_buff *skb)
-+{
-+ extern kmem_cache_t *skbuff_cachep;
-+ kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
-+ return (cp == skbuff_cachep);
-+}
-+
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+
-+ BUG_ON(skb->dev != dev);
-+
-+ /* Drop the packet if the target domain has no receive buffers. */
-+ if (!netif->active ||
-+ (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
-+ ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
-+ NET_RX_RING_SIZE))
-+ goto drop;
-+
-+ /*
-+ * We do not copy the packet unless:
-+ * 1. The data is shared; or
-+ * 2. The data is not allocated from our special cache.
-+ * NB. We also couldn't cope with fragmented packets, but we won't get
-+ * any because we not advertise the NETIF_F_SG feature.
-+ */
-+ if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
-+ int hlen = skb->data - skb->head;
-+ int ret;
-+ struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
-+ if ( unlikely(nskb == NULL) )
-+ goto drop;
-+ skb_reserve(nskb, hlen);
-+ __skb_put(nskb, skb->len);
-+ ret = skb_copy_bits(skb, -hlen, nskb->data - hlen,
-+ skb->len + hlen);
-+ BUG_ON(ret);
-+ nskb->dev = skb->dev;
-+ nskb->proto_csum_valid = skb->proto_csum_valid;
-+ dev_kfree_skb(skb);
-+ skb = nskb;
-+ }
-+
-+ netif->rx_req_cons_peek++;
-+ netif_get(netif);
-+
-+ skb_queue_tail(&rx_queue, skb);
-+ tasklet_schedule(&net_rx_tasklet);
-+
-+ return 0;
-+
-+ drop:
-+ netif->stats.tx_dropped++;
-+ dev_kfree_skb(skb);
-+ return 0;
-+}
-+
-+#if 0
-+static void xen_network_done_notify(void)
-+{
-+ static struct net_device *eth0_dev = NULL;
-+ if (unlikely(eth0_dev == NULL))
-+ eth0_dev = __dev_get_by_name("eth0");
-+ netif_rx_schedule(eth0_dev);
-+}
-+/*
-+ * Add following to poll() function in NAPI driver (Tigon3 is example):
-+ * if ( xen_network_done() )
-+ * tg3_enable_ints(tp);
-+ */
-+int xen_network_done(void)
-+{
-+ return skb_queue_empty(&rx_queue);
-+}
-+#endif
-+
-+static void net_rx_action(unsigned long unused)
-+{
-+ netif_t *netif = NULL;
-+ s8 status;
-+ u16 size, id, irq;
-+ multicall_entry_t *mcl;
-+ mmu_update_t *mmu;
-+ gnttab_transfer_t *gop;
-+ unsigned long vdata, old_mfn, new_mfn;
-+ struct sk_buff_head rxq;
-+ struct sk_buff *skb;
-+ u16 notify_list[NET_RX_RING_SIZE];
-+ int notify_nr = 0;
-+ int ret;
-+
-+ skb_queue_head_init(&rxq);
-+
-+ mcl = rx_mcl;
-+ mmu = rx_mmu;
-+ gop = grant_rx_op;
-+
-+ while ((skb = skb_dequeue(&rx_queue)) != NULL) {
-+ netif = netdev_priv(skb->dev);
-+ vdata = (unsigned long)skb->data;
-+ old_mfn = virt_to_mfn(vdata);
-+
-+ /* Memory squeeze? Back off for an arbitrary while. */
-+ if ((new_mfn = alloc_mfn()) == 0) {
-+ if ( net_ratelimit() )
-+ WPRINTK("Memory squeeze in netback driver.\n");
-+ mod_timer(&net_timer, jiffies + HZ);
-+ skb_queue_head(&rx_queue, skb);
-+ break;
-+ }
-+ /*
-+ * Set the new P2M table entry before reassigning the old data
-+ * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
-+ */
-+ set_phys_to_machine(__pa(skb->data) >> PAGE_SHIFT, new_mfn);
-+
-+ MULTI_update_va_mapping(mcl, vdata,
-+ pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
-+ mcl++;
-+
-+ gop->mfn = old_mfn;
-+ gop->domid = netif->domid;
-+ gop->ref = RING_GET_REQUEST(
-+ &netif->rx, netif->rx.req_cons)->gref;
-+ netif->rx.req_cons++;
-+ gop++;
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
-+ MMU_MACHPHYS_UPDATE;
-+ mmu->val = __pa(vdata) >> PAGE_SHIFT;
-+ mmu++;
-+ }
-+
-+ __skb_queue_tail(&rxq, skb);
-+
-+ /* Filled the batch queue? */
-+ if ((gop - grant_rx_op) == ARRAY_SIZE(grant_rx_op))
-+ break;
-+ }
-+
-+ if (mcl == rx_mcl)
-+ return;
-+
-+ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-+
-+ if (mmu - rx_mmu) {
-+ mcl->op = __HYPERVISOR_mmu_update;
-+ mcl->args[0] = (unsigned long)rx_mmu;
-+ mcl->args[1] = mmu - rx_mmu;
-+ mcl->args[2] = 0;
-+ mcl->args[3] = DOMID_SELF;
-+ mcl++;
-+ }
-+
-+ ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
-+ BUG_ON(ret != 0);
-+
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op,
-+ gop - grant_rx_op);
-+ BUG_ON(ret != 0);
-+
-+ mcl = rx_mcl;
-+ gop = grant_rx_op;
-+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+ netif = netdev_priv(skb->dev);
-+ size = skb->tail - skb->data;
-+
-+ /* Rederive the machine addresses. */
-+ new_mfn = mcl->args[1] >> PAGE_SHIFT;
-+ old_mfn = gop->mfn;
-+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
-+ skb_shinfo(skb)->nr_frags = 0;
-+ skb_shinfo(skb)->frag_list = NULL;
-+
-+ netif->stats.tx_bytes += size;
-+ netif->stats.tx_packets++;
-+
-+ /* The update_va_mapping() must not fail. */
-+ BUG_ON(mcl->result != 0);
-+
-+ /* Check the reassignment error code. */
-+ status = NETIF_RSP_OKAY;
-+ if (gop->status != 0) {
-+ DPRINTK("Bad status %d from grant transfer to DOM%u\n",
-+ gop->status, netif->domid);
-+ /*
-+ * Page no longer belongs to us unless GNTST_bad_page,
-+ * but that should be a fatal error anyway.
-+ */
-+ BUG_ON(gop->status == GNTST_bad_page);
-+ status = NETIF_RSP_ERROR;
-+ }
-+ irq = netif->irq;
-+ id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
-+ if (make_rx_response(netif, id, status,
-+ (unsigned long)skb->data & ~PAGE_MASK,
-+ size, skb->proto_csum_valid ?
-+ NETRXF_csum_valid : 0) &&
-+ (rx_notify[irq] == 0)) {
-+ rx_notify[irq] = 1;
-+ notify_list[notify_nr++] = irq;
-+ }
-+
-+ netif_put(netif);
-+ dev_kfree_skb(skb);
-+ mcl++;
-+ gop++;
-+ }
-+
-+ while (notify_nr != 0) {
-+ irq = notify_list[--notify_nr];
-+ rx_notify[irq] = 0;
-+ notify_remote_via_irq(irq);
-+ }
-+
-+ /* More work to do? */
-+ if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
-+ tasklet_schedule(&net_rx_tasklet);
-+#if 0
-+ else
-+ xen_network_done_notify();
-+#endif
-+}
-+
-+static void net_alarm(unsigned long unused)
-+{
-+ tasklet_schedule(&net_rx_tasklet);
-+}
-+
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev)
-+{
-+ netif_t *netif = netdev_priv(dev);
-+ return &netif->stats;
-+}
-+
-+static int __on_net_schedule_list(netif_t *netif)
-+{
-+ return netif->list.next != NULL;
-+}
-+
-+static void remove_from_net_schedule_list(netif_t *netif)
-+{
-+ spin_lock_irq(&net_schedule_list_lock);
-+ if (likely(__on_net_schedule_list(netif))) {
-+ list_del(&netif->list);
-+ netif->list.next = NULL;
-+ netif_put(netif);
-+ }
-+ spin_unlock_irq(&net_schedule_list_lock);
-+}
-+
-+static void add_to_net_schedule_list_tail(netif_t *netif)
-+{
-+ if (__on_net_schedule_list(netif))
-+ return;
-+
-+ spin_lock_irq(&net_schedule_list_lock);
-+ if (!__on_net_schedule_list(netif) && netif->active) {
-+ list_add_tail(&netif->list, &net_schedule_list);
-+ netif_get(netif);
-+ }
-+ spin_unlock_irq(&net_schedule_list_lock);
-+}
-+
-+/*
-+ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
-+ * If this driver is pipelining transmit requests then we can be very
-+ * aggressive in avoiding new-packet notifications -- frontend only needs to
-+ * send a notification if there are no outstanding unreceived responses.
-+ * If we may be buffer transmit buffers for any reason then we must be rather
-+ * more conservative and treat this as the final check for pending work.
-+ */
-+void netif_schedule_work(netif_t *netif)
-+{
-+ int more_to_do;
-+
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+ more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
-+#else
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+#endif
-+
-+ if (more_to_do) {
-+ add_to_net_schedule_list_tail(netif);
-+ maybe_schedule_tx_action();
-+ }
-+}
-+
-+void netif_deschedule_work(netif_t *netif)
-+{
-+ remove_from_net_schedule_list(netif);
-+}
-+
-+
-+static void tx_credit_callback(unsigned long data)
-+{
-+ netif_t *netif = (netif_t *)data;
-+ netif->remaining_credit = netif->credit_bytes;
-+ netif_schedule_work(netif);
-+}
-+
-+inline static void net_tx_action_dealloc(void)
-+{
-+ gnttab_unmap_grant_ref_t *gop;
-+ u16 pending_idx;
-+ PEND_RING_IDX dc, dp;
-+ netif_t *netif;
-+ int ret;
-+
-+ dc = dealloc_cons;
-+ dp = dealloc_prod;
-+
-+ /*
-+ * Free up any grants we have finished using
-+ */
-+ gop = tx_unmap_ops;
-+ while (dc != dp) {
-+ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-+ gop->host_addr = MMAP_VADDR(pending_idx);
-+ gop->dev_bus_addr = 0;
-+ gop->handle = grant_tx_handle[pending_idx];
-+ gop++;
-+ }
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
-+ BUG_ON(ret);
-+
-+ while (dealloc_cons != dp) {
-+ pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
-+
-+ netif = pending_tx_info[pending_idx].netif;
-+
-+ make_tx_response(netif, pending_tx_info[pending_idx].req.id,
-+ NETIF_RSP_OKAY);
-+
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+
-+ netif_put(netif);
-+ }
-+}
-+
-+/* Called after netfront has transmitted */
-+static void net_tx_action(unsigned long unused)
-+{
-+ struct list_head *ent;
-+ struct sk_buff *skb;
-+ netif_t *netif;
-+ netif_tx_request_t txreq;
-+ u16 pending_idx;
-+ RING_IDX i;
-+ gnttab_map_grant_ref_t *mop;
-+ unsigned int data_len;
-+ int ret, work_to_do;
-+
-+ if (dealloc_cons != dealloc_prod)
-+ net_tx_action_dealloc();
-+
-+ mop = tx_map_ops;
-+ while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
-+ !list_empty(&net_schedule_list)) {
-+ /* Get a netif from the list with work to do. */
-+ ent = net_schedule_list.next;
-+ netif = list_entry(ent, netif_t, list);
-+ netif_get(netif);
-+ remove_from_net_schedule_list(netif);
-+
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
-+ if (!work_to_do) {
-+ netif_put(netif);
-+ continue;
-+ }
-+
-+ i = netif->tx.req_cons;
-+ rmb(); /* Ensure that we see the request before we copy it. */
-+ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
-+ /* Credit-based scheduling. */
-+ if (txreq.size > netif->remaining_credit) {
-+ unsigned long now = jiffies;
-+ unsigned long next_credit =
-+ netif->credit_timeout.expires +
-+ msecs_to_jiffies(netif->credit_usec / 1000);
-+
-+ /* Timer could already be pending in rare cases. */
-+ if (timer_pending(&netif->credit_timeout))
-+ break;
-+
-+ /* Passed the point where we can replenish credit? */
-+ if (time_after_eq(now, next_credit)) {
-+ netif->credit_timeout.expires = now;
-+ netif->remaining_credit = netif->credit_bytes;
-+ }
-+
-+ /* Still too big to send right now? Set a callback. */
-+ if (txreq.size > netif->remaining_credit) {
-+ netif->remaining_credit = 0;
-+ netif->credit_timeout.data =
-+ (unsigned long)netif;
-+ netif->credit_timeout.function =
-+ tx_credit_callback;
-+ __mod_timer(&netif->credit_timeout,
-+ next_credit);
-+ break;
-+ }
-+ }
-+ netif->remaining_credit -= txreq.size;
-+
-+ netif->tx.req_cons++;
-+
-+ netif_schedule_work(netif);
-+
-+ if (unlikely(txreq.size < ETH_HLEN) ||
-+ unlikely(txreq.size > ETH_FRAME_LEN)) {
-+ DPRINTK("Bad packet size: %d\n", txreq.size);
-+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-+ netif_put(netif);
-+ continue;
-+ }
-+
-+ /* No crossing a page as the payload mustn't fragment. */
-+ if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
-+ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
-+ txreq.offset, txreq.size,
-+ (txreq.offset &~PAGE_MASK) + txreq.size);
-+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-+ netif_put(netif);
-+ continue;
-+ }
-+
-+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-+
-+ data_len = (txreq.size > PKT_PROT_LEN) ?
-+ PKT_PROT_LEN : txreq.size;
-+
-+ skb = alloc_skb(data_len+16, GFP_ATOMIC);
-+ if (unlikely(skb == NULL)) {
-+ DPRINTK("Can't allocate a skb in start_xmit.\n");
-+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-+ netif_put(netif);
-+ break;
-+ }
-+
-+ /* Packets passed to netif_rx() must have some headroom. */
-+ skb_reserve(skb, 16);
-+
-+ mop->host_addr = MMAP_VADDR(pending_idx);
-+ mop->dom = netif->domid;
-+ mop->ref = txreq.gref;
-+ mop->flags = GNTMAP_host_map | GNTMAP_readonly;
-+ mop++;
-+
-+ memcpy(&pending_tx_info[pending_idx].req,
-+ &txreq, sizeof(txreq));
-+ pending_tx_info[pending_idx].netif = netif;
-+ *((u16 *)skb->data) = pending_idx;
-+
-+ __skb_queue_tail(&tx_queue, skb);
-+
-+ pending_cons++;
-+
-+ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
-+ break;
-+ }
-+
-+ if (mop == tx_map_ops)
-+ return;
-+
-+ ret = HYPERVISOR_grant_table_op(
-+ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
-+ BUG_ON(ret);
-+
-+ mop = tx_map_ops;
-+ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
-+ pending_idx = *((u16 *)skb->data);
-+ netif = pending_tx_info[pending_idx].netif;
-+ memcpy(&txreq, &pending_tx_info[pending_idx].req,
-+ sizeof(txreq));
-+
-+ /* Check the remap error code. */
-+ if (unlikely(mop->status)) {
-+ printk(KERN_ALERT "#### netback grant fails\n");
-+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-+ netif_put(netif);
-+ kfree_skb(skb);
-+ mop++;
-+ pending_ring[MASK_PEND_IDX(pending_prod++)] =
-+ pending_idx;
-+ continue;
-+ }
-+ set_phys_to_machine(
-+ __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
-+ grant_tx_handle[pending_idx] = mop->handle;
-+
-+ data_len = (txreq.size > PKT_PROT_LEN) ?
-+ PKT_PROT_LEN : txreq.size;
-+
-+ __skb_put(skb, data_len);
-+ memcpy(skb->data,
-+ (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
-+ data_len);
-+ if (data_len < txreq.size) {
-+ /* Append the packet payload as a fragment. */
-+ skb_shinfo(skb)->frags[0].page =
-+ virt_to_page(MMAP_VADDR(pending_idx));
-+ skb_shinfo(skb)->frags[0].size =
-+ txreq.size - data_len;
-+ skb_shinfo(skb)->frags[0].page_offset =
-+ txreq.offset + data_len;
-+ skb_shinfo(skb)->nr_frags = 1;
-+ } else {
-+ /* Schedule a response immediately. */
-+ netif_idx_release(pending_idx);
-+ }
-+
-+ skb->data_len = txreq.size - data_len;
-+ skb->len += skb->data_len;
-+
-+ skb->dev = netif->dev;
-+ skb->protocol = eth_type_trans(skb, skb->dev);
-+
-+ /*
-+ * No checking needed on localhost, but remember the field is
-+ * blank.
-+ */
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+ skb->proto_csum_valid = 1;
-+ skb->proto_csum_blank = !!(txreq.flags & NETTXF_csum_blank);
-+
-+ netif->stats.rx_bytes += txreq.size;
-+ netif->stats.rx_packets++;
-+
-+ netif_rx(skb);
-+ netif->dev->last_rx = jiffies;
-+
-+ mop++;
-+ }
-+}
-+
-+static void netif_idx_release(u16 pending_idx)
-+{
-+ static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&_lock, flags);
-+ dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
-+ spin_unlock_irqrestore(&_lock, flags);
-+
-+ tasklet_schedule(&net_tx_tasklet);
-+}
-+
-+static void netif_page_release(struct page *page)
-+{
-+ u16 pending_idx = page - virt_to_page(mmap_vstart);
-+
-+ /* Ready for next use. */
-+ set_page_count(page, 1);
-+
-+ netif_idx_release(pending_idx);
-+}
-+
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ netif_t *netif = dev_id;
-+ add_to_net_schedule_list_tail(netif);
-+ maybe_schedule_tx_action();
-+ return IRQ_HANDLED;
-+}
-+
-+static void make_tx_response(netif_t *netif,
-+ u16 id,
-+ s8 st)
-+{
-+ RING_IDX i = netif->tx.rsp_prod_pvt;
-+ netif_tx_response_t *resp;
-+ int notify;
-+
-+ resp = RING_GET_RESPONSE(&netif->tx, i);
-+ resp->id = id;
-+ resp->status = st;
-+
-+ netif->tx.rsp_prod_pvt = ++i;
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
-+ if (notify)
-+ notify_remote_via_irq(netif->irq);
-+
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+ if (i == netif->tx.req_cons) {
-+ int more_to_do;
-+ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+ if (more_to_do)
-+ add_to_net_schedule_list_tail(netif);
-+ }
-+#endif
-+}
-+
-+static int make_rx_response(netif_t *netif,
-+ u16 id,
-+ s8 st,
-+ u16 offset,
-+ u16 size,
-+ u16 flags)
-+{
-+ RING_IDX i = netif->rx.rsp_prod_pvt;
-+ netif_rx_response_t *resp;
-+ int notify;
-+
-+ resp = RING_GET_RESPONSE(&netif->rx, i);
-+ resp->offset = offset;
-+ resp->flags = flags;
-+ resp->id = id;
-+ resp->status = (s16)size;
-+ if (st < 0)
-+ resp->status = (s16)st;
-+
-+ netif->rx.rsp_prod_pvt = ++i;
-+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, notify);
-+
-+ return notify;
-+}
-+
-+#ifdef NETBE_DEBUG_INTERRUPT
-+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ struct list_head *ent;
-+ netif_t *netif;
-+ int i = 0;
-+
-+ printk(KERN_ALERT "netif_schedule_list:\n");
-+ spin_lock_irq(&net_schedule_list_lock);
-+
-+ list_for_each (ent, &net_schedule_list) {
-+ netif = list_entry(ent, netif_t, list);
-+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
-+ "rx_resp_prod=%08x\n",
-+ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
-+ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
-+ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
-+ printk(KERN_ALERT " shared(rx_req_prod=%08x "
-+ "rx_resp_prod=%08x\n",
-+ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
-+ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
-+ netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
-+ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
-+ netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
-+ i++;
-+ }
-+
-+ spin_unlock_irq(&net_schedule_list_lock);
-+ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
-+
-+ return IRQ_HANDLED;
-+}
-+#endif
-+
-+static int __init netback_init(void)
-+{
-+ int i;
-+ struct page *page;
-+
-+ /* We can increase reservation by this much in net_rx_action(). */
-+ balloon_update_driver_allowance(NET_RX_RING_SIZE);
-+
-+ skb_queue_head_init(&rx_queue);
-+ skb_queue_head_init(&tx_queue);
-+
-+ init_timer(&net_timer);
-+ net_timer.data = 0;
-+ net_timer.function = net_alarm;
-+
-+ page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
-+ BUG_ON(page == NULL);
-+ mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-+
-+ for (i = 0; i < MAX_PENDING_REQS; i++) {
-+ page = virt_to_page(MMAP_VADDR(i));
-+ set_page_count(page, 1);
-+ SetPageForeign(page, netif_page_release);
-+ }
-+
-+ pending_cons = 0;
-+ pending_prod = MAX_PENDING_REQS;
-+ for (i = 0; i < MAX_PENDING_REQS; i++)
-+ pending_ring[i] = i;
-+
-+ spin_lock_init(&net_schedule_list_lock);
-+ INIT_LIST_HEAD(&net_schedule_list);
-+
-+ netif_xenbus_init();
-+
-+#ifdef NETBE_DEBUG_INTERRUPT
-+ (void)bind_virq_to_irqhandler(
-+ VIRQ_DEBUG,
-+ 0,
-+ netif_be_dbg,
-+ SA_SHIRQ,
-+ "net-be-dbg",
-+ &netif_be_dbg);
-+#endif
-+
-+ __unsafe(THIS_MODULE);
-+
-+ return 0;
-+}
-+
-+static void netback_cleanup(void)
-+{
-+ BUG();
-+}
-+
-+module_init(netback_init);
-+module_exit(netback_cleanup);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/netback/xenbus.c b/drivers/xen/netback/xenbus.c
-new file mode 100644
-index 0000000..64bebdb
---- /dev/null
-+++ b/drivers/xen/netback/xenbus.c
-@@ -0,0 +1,327 @@
-+/* Xenbus code for netif backend
-+ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
-+ Copyright (C) 2005 XenSource Ltd
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 2 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; if not, write to the Free Software
-+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+*/
-+
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <xen/xenbus.h>
-+#include <xen/net_driver_util.h>
-+#include "common.h"
-+
-+
-+#if 0
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...) \
-+ printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+#endif
-+
-+
-+struct backend_info
-+{
-+ struct xenbus_device *dev;
-+ netif_t *netif;
-+ struct xenbus_watch backend_watch;
-+ XenbusState frontend_state;
-+};
-+
-+
-+static int connect_rings(struct backend_info *);
-+static void connect(struct backend_info *);
-+static void maybe_connect(struct backend_info *);
-+static void backend_changed(struct xenbus_watch *, const char **,
-+ unsigned int);
-+
-+
-+static int netback_remove(struct xenbus_device *dev)
-+{
-+ struct backend_info *be = dev->data;
-+
-+ if (be->backend_watch.node) {
-+ unregister_xenbus_watch(&be->backend_watch);
-+ kfree(be->backend_watch.node);
-+ be->backend_watch.node = NULL;
-+ }
-+ if (be->netif) {
-+ netif_disconnect(be->netif);
-+ be->netif = NULL;
-+ }
-+ kfree(be);
-+ dev->data = NULL;
-+ return 0;
-+}
-+
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures, and watch the store waiting for the hotplug scripts to tell us
-+ * the device's handle. Switch to InitWait.
-+ */
-+static int netback_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ struct backend_info *be = kmalloc(sizeof(struct backend_info),
-+ GFP_KERNEL);
-+ if (!be) {
-+ xenbus_dev_fatal(dev, -ENOMEM,
-+ "allocating backend structure");
-+ return -ENOMEM;
-+ }
-+ memset(be, 0, sizeof(*be));
-+
-+ be->dev = dev;
-+ dev->data = be;
-+
-+ err = xenbus_watch_path2(dev, dev->nodename, "handle",
-+ &be->backend_watch, backend_changed);
-+ if (err)
-+ goto fail;
-+
-+ err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
-+ if (err) {
-+ goto fail;
-+ }
-+
-+ return 0;
-+
-+fail:
-+ DPRINTK("failed");
-+ netback_remove(dev);
-+ return err;
-+}
-+
-+
-+/**
-+ * Handle the creation of the hotplug script environment. We add the script
-+ * and vif variables to the environment, for the benefit of the vif-* hotplug
-+ * scripts.
-+ */
-+static int netback_uevent(struct xenbus_device *xdev, char **envp,
-+ int num_envp, char *buffer, int buffer_size)
-+{
-+ struct backend_info *be = xdev->data;
-+ netif_t *netif = be->netif;
-+ int i = 0, length = 0;
-+ char *val;
-+
-+ DPRINTK("netback_uevent");
-+
-+ val = xenbus_read(XBT_NULL, xdev->nodename, "script", NULL);
-+ if (IS_ERR(val)) {
-+ int err = PTR_ERR(val);
-+ xenbus_dev_fatal(xdev, err, "reading script");
-+ return err;
-+ }
-+ else {
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
-+ &length, "script=%s", val);
-+ kfree(val);
-+ }
-+
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "vif=%s", netif->dev->name);
-+
-+ envp[i] = NULL;
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * Callback received when the hotplug scripts have placed the handle node.
-+ * Read it, and create a netif structure. If the frontend is ready, connect.
-+ */
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ int err;
-+ long handle;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, backend_watch);
-+ struct xenbus_device *dev = be->dev;
-+
-+ DPRINTK("");
-+
-+ err = xenbus_scanf(XBT_NULL, dev->nodename, "handle", "%li", &handle);
-+ if (XENBUS_EXIST_ERR(err)) {
-+ /* Since this watch will fire once immediately after it is
-+ registered, we expect this. Ignore it, and wait for the
-+ hotplug scripts. */
-+ return;
-+ }
-+ if (err != 1) {
-+ xenbus_dev_fatal(dev, err, "reading handle");
-+ return;
-+ }
-+
-+ if (be->netif == NULL) {
-+ u8 be_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
-+
-+ be->netif = alloc_netif(dev->otherend_id, handle, be_mac);
-+ if (IS_ERR(be->netif)) {
-+ err = PTR_ERR(be->netif);
-+ be->netif = NULL;
-+ xenbus_dev_fatal(dev, err, "creating interface");
-+ return;
-+ }
-+
-+ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
-+
-+ maybe_connect(be);
-+ }
-+}
-+
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+ XenbusState frontend_state)
-+{
-+ struct backend_info *be = dev->data;
-+
-+ DPRINTK("");
-+
-+ be->frontend_state = frontend_state;
-+
-+ switch (frontend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitialised:
-+ break;
-+
-+ case XenbusStateConnected:
-+ maybe_connect(be);
-+ break;
-+
-+ case XenbusStateClosing:
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateClosed:
-+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-+ device_unregister(&dev->dev);
-+ break;
-+
-+ case XenbusStateUnknown:
-+ case XenbusStateInitWait:
-+ default:
-+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+ frontend_state);
-+ break;
-+ }
-+}
-+
-+
-+/* ** Connection ** */
-+
-+
-+static void maybe_connect(struct backend_info *be)
-+{
-+ if (be->netif != NULL && be->frontend_state == XenbusStateConnected) {
-+ connect(be);
-+ }
-+}
-+
-+
-+static void connect(struct backend_info *be)
-+{
-+ int err;
-+ struct xenbus_device *dev = be->dev;
-+
-+ err = connect_rings(be);
-+ if (err)
-+ return;
-+
-+ err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+ return;
-+ }
-+
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateConnected);
-+}
-+
-+
-+static int connect_rings(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long tx_ring_ref, rx_ring_ref;
-+ unsigned int evtchn;
-+ int err;
-+
-+ DPRINTK("");
-+
-+ err = xenbus_gather(XBT_NULL, dev->otherend,
-+ "tx-ring-ref", "%lu", &tx_ring_ref,
-+ "rx-ring-ref", "%lu", &rx_ring_ref,
-+ "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ dev->otherend);
-+ return err;
-+ }
-+
-+ /* Map the shared frame, irq etc. */
-+ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err,
-+ "mapping shared-frames %lu/%lu port %u",
-+ tx_ring_ref, rx_ring_ref, evtchn);
-+ return err;
-+ }
-+ return 0;
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id netback_ids[] = {
-+ { "vif" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver netback = {
-+ .name = "vif",
-+ .owner = THIS_MODULE,
-+ .ids = netback_ids,
-+ .probe = netback_probe,
-+ .remove = netback_remove,
-+ .uevent = netback_uevent,
-+ .otherend_changed = frontend_changed,
-+};
-+
-+
-+void netif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&netback);
-+}
-+
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/netfront/Kconfig b/drivers/xen/netfront/Kconfig
-new file mode 100644
-index 0000000..334e6c3
---- /dev/null
-+++ b/drivers/xen/netfront/Kconfig
-@@ -0,0 +1,6 @@
-+
-+config XENNET
-+ tristate "Xen network driver"
-+ depends on NETDEVICES && ARCH_XEN
-+ help
-+ Network driver for Xen
-diff --git a/drivers/xen/netfront/Makefile b/drivers/xen/netfront/Makefile
-new file mode 100644
-index 0000000..dc22829
---- /dev/null
-+++ b/drivers/xen/netfront/Makefile
-@@ -0,0 +1,4 @@
-+
-+obj-$(CONFIG_XEN_NETDEV_FRONTEND) := xennet.o
-+
-+xennet-objs := netfront.o
-diff --git a/drivers/xen/netfront/netfront.c b/drivers/xen/netfront/netfront.c
-new file mode 100644
-index 0000000..79d8645
---- /dev/null
-+++ b/drivers/xen/netfront/netfront.c
-@@ -0,0 +1,1500 @@
-+/******************************************************************************
-+ * Virtual network driver for conversing with remote driver backends.
-+ *
-+ * Copyright (c) 2002-2005, K A Fraser
-+ * Copyright (c) 2005, XenSource Ltd
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/init.h>
-+#include <linux/bitops.h>
-+#include <linux/proc_fs.h>
-+#include <linux/ethtool.h>
-+#include <linux/in.h>
-+#include <net/sock.h>
-+#include <net/pkt_sched.h>
-+#include <net/arp.h>
-+#include <net/route.h>
-+#include <asm/io.h>
-+#include <asm/uaccess.h>
-+#include <xen/evtchn.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/io/netif.h>
-+#include <xen/interface/memory.h>
-+#include <xen/balloon.h>
-+#include <asm/page.h>
-+#include <asm/uaccess.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/gnttab.h>
-+#include <xen/net_driver_util.h>
-+
-+#define GRANT_INVALID_REF 0
-+
-+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
-+
-+#ifndef __GFP_NOWARN
-+#define __GFP_NOWARN 0
-+#endif
-+#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
-+
-+#define init_skb_shinfo(_skb) \
-+ do { \
-+ atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
-+ skb_shinfo(_skb)->nr_frags = 0; \
-+ skb_shinfo(_skb)->frag_list = NULL; \
-+ } while (0)
-+
-+static unsigned long rx_pfn_array[NET_RX_RING_SIZE];
-+static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
-+static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-+
-+struct netfront_info
-+{
-+ struct list_head list;
-+ struct net_device *netdev;
-+
-+ struct net_device_stats stats;
-+ unsigned int tx_full;
-+
-+ netif_tx_front_ring_t tx;
-+ netif_rx_front_ring_t rx;
-+
-+ spinlock_t tx_lock;
-+ spinlock_t rx_lock;
-+
-+ unsigned int handle;
-+ unsigned int evtchn, irq;
-+
-+ /* What is the status of our connection to the remote backend? */
-+#define BEST_CLOSED 0
-+#define BEST_DISCONNECTED 1
-+#define BEST_CONNECTED 2
-+ unsigned int backend_state;
-+
-+ /* Is this interface open or closed (down or up)? */
-+#define UST_CLOSED 0
-+#define UST_OPEN 1
-+ unsigned int user_state;
-+
-+ /* Receive-ring batched refills. */
-+#define RX_MIN_TARGET 8
-+#define RX_DFL_MIN_TARGET 64
-+#define RX_MAX_TARGET NET_RX_RING_SIZE
-+ int rx_min_target, rx_max_target, rx_target;
-+ struct sk_buff_head rx_batch;
-+
-+ struct timer_list rx_refill_timer;
-+
-+ /*
-+ * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
-+ * array is an index into a chain of free entries.
-+ */
-+ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
-+ struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
-+
-+ grant_ref_t gref_tx_head;
-+ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
-+ grant_ref_t gref_rx_head;
-+ grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
-+
-+ struct xenbus_device *xbdev;
-+ int tx_ring_ref;
-+ int rx_ring_ref;
-+ u8 mac[ETH_ALEN];
-+};
-+
-+/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
-+#define ADD_ID_TO_FREELIST(_list, _id) \
-+ (_list)[(_id)] = (_list)[0]; \
-+ (_list)[0] = (void *)(unsigned long)(_id);
-+#define GET_ID_FROM_FREELIST(_list) \
-+ ({ unsigned long _id = (unsigned long)(_list)[0]; \
-+ (_list)[0] = (_list)[_id]; \
-+ (unsigned short)_id; })
-+
-+#ifdef DEBUG
-+static char *be_state_name[] = {
-+ [BEST_CLOSED] = "closed",
-+ [BEST_DISCONNECTED] = "disconnected",
-+ [BEST_CONNECTED] = "connected",
-+};
-+#endif
-+
-+#define DPRINTK(fmt, args...) pr_debug("netfront (%s:%d) " fmt, \
-+ __FUNCTION__, __LINE__, ##args)
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "netfront: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "netfront: " fmt, ##args)
-+
-+
-+static int talk_to_backend(struct xenbus_device *, struct netfront_info *);
-+static int setup_device(struct xenbus_device *, struct netfront_info *);
-+static int create_netdev(int, struct xenbus_device *, struct net_device **);
-+
-+static void netfront_closing(struct xenbus_device *);
-+
-+static void end_access(int, void *);
-+static void netif_disconnect_backend(struct netfront_info *);
-+static void close_netdev(struct netfront_info *);
-+static void netif_free(struct netfront_info *);
-+
-+static void show_device(struct netfront_info *);
-+
-+static void network_connect(struct net_device *);
-+static void network_tx_buf_gc(struct net_device *);
-+static void network_alloc_rx_buffers(struct net_device *);
-+static int send_fake_arp(struct net_device *);
-+
-+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
-+
-+#ifdef CONFIG_PROC_FS
-+static int xennet_proc_init(void);
-+static int xennet_proc_addif(struct net_device *dev);
-+static void xennet_proc_delif(struct net_device *dev);
-+#else
-+#define xennet_proc_init() (0)
-+#define xennet_proc_addif(d) (0)
-+#define xennet_proc_delif(d) ((void)0)
-+#endif
-+
-+
-+/**
-+ * Entry point to this code when a new device is created. Allocate the basic
-+ * structures and the ring buffers for communication with the backend, and
-+ * inform the backend of the appropriate details for those. Switch to
-+ * Connected state.
-+ */
-+static int netfront_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ struct net_device *netdev;
-+ struct netfront_info *info;
-+ unsigned int handle;
-+
-+ err = xenbus_scanf(XBT_NULL, dev->nodename, "handle", "%u", &handle);
-+ if (err != 1) {
-+ xenbus_dev_fatal(dev, err, "reading handle");
-+ return err;
-+ }
-+
-+ err = create_netdev(handle, dev, &netdev);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "creating netdev");
-+ return err;
-+ }
-+
-+ info = netdev_priv(netdev);
-+ dev->data = info;
-+
-+ err = talk_to_backend(dev, info);
-+ if (err) {
-+ kfree(info);
-+ dev->data = NULL;
-+ return err;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
-+ * driver restart. We tear down our netif structure and recreate it, but
-+ * leave the device-layer structures intact so that this is transparent to the
-+ * rest of the kernel.
-+ */
-+static int netfront_resume(struct xenbus_device *dev)
-+{
-+ struct netfront_info *info = dev->data;
-+
-+ DPRINTK("%s\n", dev->nodename);
-+
-+ netif_disconnect_backend(info);
-+ return talk_to_backend(dev, info);
-+}
-+
-+
-+/* Common code used when first setting up, and when resuming. */
-+static int talk_to_backend(struct xenbus_device *dev,
-+ struct netfront_info *info)
-+{
-+ const char *message;
-+ xenbus_transaction_t xbt;
-+ int err;
-+
-+ err = xen_net_read_mac(dev, info->mac);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+ goto out;
-+ }
-+
-+ /* Create shared ring, alloc event channel. */
-+ err = setup_device(dev, info);
-+ if (err)
-+ goto out;
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ goto destroy_ring;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
-+ info->tx_ring_ref);
-+ if (err) {
-+ message = "writing tx ring-ref";
-+ goto abort_transaction;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
-+ info->rx_ring_ref);
-+ if (err) {
-+ message = "writing rx ring-ref";
-+ goto abort_transaction;
-+ }
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "event-channel", "%u", info->evtchn);
-+ if (err) {
-+ message = "writing event-channel";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "state", "%d", XenbusStateConnected);
-+ if (err) {
-+ message = "writing frontend XenbusStateConnected";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err) {
-+ if (err == -EAGAIN)
-+ goto again;
-+ xenbus_dev_fatal(dev, err, "completing transaction");
-+ goto destroy_ring;
-+ }
-+
-+ return 0;
-+
-+ abort_transaction:
-+ xenbus_transaction_end(xbt, 1);
-+ xenbus_dev_fatal(dev, err, "%s", message);
-+ destroy_ring:
-+ netif_free(info);
-+ out:
-+ return err;
-+}
-+
-+
-+static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
-+{
-+ netif_tx_sring_t *txs;
-+ netif_rx_sring_t *rxs;
-+ int err;
-+ struct net_device *netdev = info->netdev;
-+
-+ info->tx_ring_ref = GRANT_INVALID_REF;
-+ info->rx_ring_ref = GRANT_INVALID_REF;
-+ info->rx.sring = NULL;
-+ info->tx.sring = NULL;
-+ info->irq = 0;
-+
-+ txs = (netif_tx_sring_t *)__get_free_page(GFP_KERNEL);
-+ if (!txs) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(dev, err, "allocating tx ring page");
-+ goto fail;
-+ }
-+ rxs = (netif_rx_sring_t *)__get_free_page(GFP_KERNEL);
-+ if (!rxs) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(dev, err, "allocating rx ring page");
-+ goto fail;
-+ }
-+ memset(txs, 0, PAGE_SIZE);
-+ memset(rxs, 0, PAGE_SIZE);
-+ info->backend_state = BEST_DISCONNECTED;
-+
-+ SHARED_RING_INIT(txs);
-+ FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
-+
-+ SHARED_RING_INIT(rxs);
-+ FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
-+
-+ err = xenbus_grant_ring(dev, virt_to_mfn(txs));
-+ if (err < 0)
-+ goto fail;
-+ info->tx_ring_ref = err;
-+
-+ err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
-+ if (err < 0)
-+ goto fail;
-+ info->rx_ring_ref = err;
-+
-+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
-+ if (err)
-+ goto fail;
-+
-+ memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
-+ network_connect(netdev);
-+ info->irq = bind_evtchn_to_irqhandler(
-+ info->evtchn, netif_int, SA_SAMPLE_RANDOM, netdev->name,
-+ netdev);
-+ (void)send_fake_arp(netdev);
-+ show_device(info);
-+
-+ return 0;
-+
-+ fail:
-+ netif_free(info);
-+ return err;
-+}
-+
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+ XenbusState backend_state)
-+{
-+ DPRINTK("\n");
-+
-+ switch (backend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitWait:
-+ case XenbusStateInitialised:
-+ case XenbusStateConnected:
-+ case XenbusStateUnknown:
-+ case XenbusStateClosed:
-+ break;
-+
-+ case XenbusStateClosing:
-+ netfront_closing(dev);
-+ break;
-+ }
-+}
-+
-+
-+/** Send a packet on a net device to encourage switches to learn the
-+ * MAC. We send a fake ARP request.
-+ *
-+ * @param dev device
-+ * @return 0 on success, error code otherwise
-+ */
-+static int send_fake_arp(struct net_device *dev)
-+{
-+ struct sk_buff *skb;
-+ u32 src_ip, dst_ip;
-+
-+ dst_ip = INADDR_BROADCAST;
-+ src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
-+
-+ /* No IP? Then nothing to do. */
-+ if (src_ip == 0)
-+ return 0;
-+
-+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
-+ dst_ip, dev, src_ip,
-+ /*dst_hw*/ NULL, /*src_hw*/ NULL,
-+ /*target_hw*/ dev->dev_addr);
-+ if (skb == NULL)
-+ return -ENOMEM;
-+
-+ return dev_queue_xmit(skb);
-+}
-+
-+
-+static int network_open(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+
-+ memset(&np->stats, 0, sizeof(np->stats));
-+
-+ np->user_state = UST_OPEN;
-+
-+ network_alloc_rx_buffers(dev);
-+ np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
-+
-+ netif_start_queue(dev);
-+
-+ return 0;
-+}
-+
-+static void network_tx_buf_gc(struct net_device *dev)
-+{
-+ RING_IDX i, prod;
-+ unsigned short id;
-+ struct netfront_info *np = netdev_priv(dev);
-+ struct sk_buff *skb;
-+
-+ if (np->backend_state != BEST_CONNECTED)
-+ return;
-+
-+ do {
-+ prod = np->tx.sring->rsp_prod;
-+ rmb(); /* Ensure we see responses up to 'rp'. */
-+
-+ for (i = np->tx.rsp_cons; i != prod; i++) {
-+ id = RING_GET_RESPONSE(&np->tx, i)->id;
-+ skb = np->tx_skbs[id];
-+ if (unlikely(gnttab_query_foreign_access(
-+ np->grant_tx_ref[id]) != 0)) {
-+ printk(KERN_ALERT "network_tx_buf_gc: warning "
-+ "-- grant still in use by backend "
-+ "domain.\n");
-+ goto out;
-+ }
-+ gnttab_end_foreign_access_ref(
-+ np->grant_tx_ref[id], GNTMAP_readonly);
-+ gnttab_release_grant_reference(
-+ &np->gref_tx_head, np->grant_tx_ref[id]);
-+ np->grant_tx_ref[id] = GRANT_INVALID_REF;
-+ ADD_ID_TO_FREELIST(np->tx_skbs, id);
-+ dev_kfree_skb_irq(skb);
-+ }
-+
-+ np->tx.rsp_cons = prod;
-+
-+ /*
-+ * Set a new event, then check for race with update of tx_cons.
-+ * Note that it is essential to schedule a callback, no matter
-+ * how few buffers are pending. Even if there is space in the
-+ * transmit ring, higher layers may be blocked because too much
-+ * data is outstanding: in such cases notification from Xen is
-+ * likely to be the only kick that we'll get.
-+ */
-+ np->tx.sring->rsp_event =
-+ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
-+ mb();
-+ } while (prod != np->tx.sring->rsp_prod);
-+
-+ out:
-+ if (np->tx_full &&
-+ ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
-+ np->tx_full = 0;
-+ if (np->user_state == UST_OPEN)
-+ netif_wake_queue(dev);
-+ }
-+}
-+
-+
-+static void rx_refill_timeout(unsigned long data)
-+{
-+ struct net_device *dev = (struct net_device *)data;
-+ netif_rx_schedule(dev);
-+}
-+
-+
-+static void network_alloc_rx_buffers(struct net_device *dev)
-+{
-+ unsigned short id;
-+ struct netfront_info *np = netdev_priv(dev);
-+ struct sk_buff *skb;
-+ int i, batch_target;
-+ RING_IDX req_prod = np->rx.req_prod_pvt;
-+ struct xen_memory_reservation reservation;
-+ grant_ref_t ref;
-+
-+ if (unlikely(np->backend_state != BEST_CONNECTED))
-+ return;
-+
-+ /*
-+ * Allocate skbuffs greedily, even though we batch updates to the
-+ * receive ring. This creates a less bursty demand on the memory
-+ * allocator, so should reduce the chance of failed allocation requests
-+ * both for ourself and for other kernel subsystems.
-+ */
-+ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
-+ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
-+ /*
-+ * Subtract dev_alloc_skb headroom (16 bytes) and shared info
-+ * tailroom then round down to SKB_DATA_ALIGN boundary.
-+ */
-+ skb = alloc_xen_skb(
-+ ((PAGE_SIZE - sizeof(struct skb_shared_info)) &
-+ (-SKB_DATA_ALIGN(1))) - 16);
-+ if (skb == NULL) {
-+ /* Any skbuffs queued for refill? Force them out. */
-+ if (i != 0)
-+ goto refill;
-+ /* Could not allocate any skbuffs. Try again later. */
-+ mod_timer(&np->rx_refill_timer,
-+ jiffies + (HZ/10));
-+ return;
-+ }
-+ __skb_queue_tail(&np->rx_batch, skb);
-+ }
-+
-+ /* Is the batch large enough to be worthwhile? */
-+ if (i < (np->rx_target/2))
-+ return;
-+
-+ /* Adjust our fill target if we risked running out of buffers. */
-+ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
-+ ((np->rx_target *= 2) > np->rx_max_target))
-+ np->rx_target = np->rx_max_target;
-+
-+ refill:
-+ for (i = 0; ; i++) {
-+ if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
-+ break;
-+
-+ skb->dev = dev;
-+
-+ id = GET_ID_FROM_FREELIST(np->rx_skbs);
-+
-+ np->rx_skbs[id] = skb;
-+
-+ RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
-+ ref = gnttab_claim_grant_reference(&np->gref_rx_head);
-+ BUG_ON((signed short)ref < 0);
-+ np->grant_rx_ref[id] = ref;
-+ gnttab_grant_foreign_transfer_ref(ref,
-+ np->xbdev->otherend_id,
-+ __pa(skb->head) >> PAGE_SHIFT);
-+ RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
-+ rx_pfn_array[i] = virt_to_mfn(skb->head);
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Remove this page before passing back to Xen. */
-+ set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
-+ INVALID_P2M_ENTRY);
-+ MULTI_update_va_mapping(rx_mcl+i,
-+ (unsigned long)skb->head,
-+ __pte(0), 0);
-+ }
-+ }
-+
-+ /* Tell the ballon driver what is going on. */
-+ balloon_update_driver_allowance(i);
-+
-+ reservation.extent_start = rx_pfn_array;
-+ reservation.nr_extents = i;
-+ reservation.extent_order = 0;
-+ reservation.address_bits = 0;
-+ reservation.domid = DOMID_SELF;
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* After all PTEs have been zapped, flush the TLB. */
-+ rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
-+ UVMF_TLB_FLUSH|UVMF_ALL;
-+
-+ /* Give away a batch of pages. */
-+ rx_mcl[i].op = __HYPERVISOR_memory_op;
-+ rx_mcl[i].args[0] = XENMEM_decrease_reservation;
-+ rx_mcl[i].args[1] = (unsigned long)&reservation;
-+
-+ /* Zap PTEs and give away pages in one big multicall. */
-+ (void)HYPERVISOR_multicall(rx_mcl, i+1);
-+
-+ /* Check return status of HYPERVISOR_memory_op(). */
-+ if (unlikely(rx_mcl[i].result != i))
-+ panic("Unable to reduce memory reservation\n");
-+ } else
-+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+ &reservation) != i)
-+ panic("Unable to reduce memory reservation\n");
-+
-+ /* Above is a suitable barrier to ensure backend will see requests. */
-+ np->rx.req_prod_pvt = req_prod + i;
-+ RING_PUSH_REQUESTS(&np->rx);
-+}
-+
-+
-+static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+ unsigned short id;
-+ struct netfront_info *np = netdev_priv(dev);
-+ netif_tx_request_t *tx;
-+ RING_IDX i;
-+ grant_ref_t ref;
-+ unsigned long mfn;
-+ int notify;
-+
-+ if (unlikely(np->tx_full)) {
-+ printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
-+ dev->name);
-+ netif_stop_queue(dev);
-+ goto drop;
-+ }
-+
-+ if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
-+ PAGE_SIZE)) {
-+ struct sk_buff *nskb;
-+ if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
-+ goto drop;
-+ skb_put(nskb, skb->len);
-+ memcpy(nskb->data, skb->data, skb->len);
-+ nskb->dev = skb->dev;
-+ dev_kfree_skb(skb);
-+ skb = nskb;
-+ }
-+
-+ spin_lock_irq(&np->tx_lock);
-+
-+ if (np->backend_state != BEST_CONNECTED) {
-+ spin_unlock_irq(&np->tx_lock);
-+ goto drop;
-+ }
-+
-+ i = np->tx.req_prod_pvt;
-+
-+ id = GET_ID_FROM_FREELIST(np->tx_skbs);
-+ np->tx_skbs[id] = skb;
-+
-+ tx = RING_GET_REQUEST(&np->tx, i);
-+
-+ tx->id = id;
-+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-+ BUG_ON((signed short)ref < 0);
-+ mfn = virt_to_mfn(skb->data);
-+ gnttab_grant_foreign_access_ref(
-+ ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
-+ tx->gref = np->grant_tx_ref[id] = ref;
-+ tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
-+ tx->size = skb->len;
-+ tx->flags = (skb->ip_summed == CHECKSUM_HW) ? NETTXF_csum_blank : 0;
-+
-+ np->tx.req_prod_pvt = i + 1;
-+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
-+ if (notify)
-+ notify_remote_via_irq(np->irq);
-+
-+ network_tx_buf_gc(dev);
-+
-+ if (RING_FULL(&np->tx)) {
-+ np->tx_full = 1;
-+ netif_stop_queue(dev);
-+ }
-+
-+ spin_unlock_irq(&np->tx_lock);
-+
-+ np->stats.tx_bytes += skb->len;
-+ np->stats.tx_packets++;
-+
-+ return 0;
-+
-+ drop:
-+ np->stats.tx_dropped++;
-+ dev_kfree_skb(skb);
-+ return 0;
-+}
-+
-+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-+{
-+ struct net_device *dev = dev_id;
-+ struct netfront_info *np = netdev_priv(dev);
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&np->tx_lock, flags);
-+ network_tx_buf_gc(dev);
-+ spin_unlock_irqrestore(&np->tx_lock, flags);
-+
-+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx) &&
-+ (np->user_state == UST_OPEN))
-+ netif_rx_schedule(dev);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+
-+static int netif_poll(struct net_device *dev, int *pbudget)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ struct sk_buff *skb, *nskb;
-+ netif_rx_response_t *rx;
-+ RING_IDX i, rp;
-+ mmu_update_t *mmu = rx_mmu;
-+ multicall_entry_t *mcl = rx_mcl;
-+ int work_done, budget, more_to_do = 1;
-+ struct sk_buff_head rxq;
-+ unsigned long flags;
-+ unsigned long mfn;
-+ grant_ref_t ref;
-+
-+ spin_lock(&np->rx_lock);
-+
-+ if (np->backend_state != BEST_CONNECTED) {
-+ spin_unlock(&np->rx_lock);
-+ return 0;
-+ }
-+
-+ skb_queue_head_init(&rxq);
-+
-+ if ((budget = *pbudget) > dev->quota)
-+ budget = dev->quota;
-+ rp = np->rx.sring->rsp_prod;
-+ rmb(); /* Ensure we see queued responses up to 'rp'. */
-+
-+ for (i = np->rx.rsp_cons, work_done = 0;
-+ (i != rp) && (work_done < budget);
-+ i++, work_done++) {
-+ rx = RING_GET_RESPONSE(&np->rx, i);
-+
-+ /*
-+ * This definitely indicates a bug, either in this driver or
-+ * in the backend driver. In future this should flag the bad
-+ * situation to the system controller to reboot the backed.
-+ */
-+ if ((ref = np->grant_rx_ref[rx->id]) == GRANT_INVALID_REF) {
-+ WPRINTK("Bad rx response id %d.\n", rx->id);
-+ work_done--;
-+ continue;
-+ }
-+
-+ /* Memory pressure, insufficient buffer headroom, ... */
-+ if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
-+ if (net_ratelimit())
-+ WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
-+ rx->id, rx->status);
-+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
-+ rx->id;
-+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref =
-+ ref;
-+ np->rx.req_prod_pvt++;
-+ RING_PUSH_REQUESTS(&np->rx);
-+ work_done--;
-+ continue;
-+ }
-+
-+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
-+ np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
-+
-+ skb = np->rx_skbs[rx->id];
-+ ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
-+
-+ /* NB. We handle skb overflow later. */
-+ skb->data = skb->head + rx->offset;
-+ skb->len = rx->status;
-+ skb->tail = skb->data + skb->len;
-+
-+ if (rx->flags & NETRXF_csum_valid)
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+ np->stats.rx_packets++;
-+ np->stats.rx_bytes += rx->status;
-+
-+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-+ /* Remap the page. */
-+ MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
-+ pfn_pte_ma(mfn, PAGE_KERNEL),
-+ 0);
-+ mcl++;
-+ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
-+ | MMU_MACHPHYS_UPDATE;
-+ mmu->val = __pa(skb->head) >> PAGE_SHIFT;
-+ mmu++;
-+
-+ set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
-+ mfn);
-+ }
-+
-+ __skb_queue_tail(&rxq, skb);
-+ }
-+
-+ /* Some pages are no longer absent... */
-+ balloon_update_driver_allowance(-work_done);
-+
-+ /* Do all the remapping work, and M2P updates, in one big hypercall. */
-+ if (likely((mcl - rx_mcl) != 0)) {
-+ mcl->op = __HYPERVISOR_mmu_update;
-+ mcl->args[0] = (unsigned long)rx_mmu;
-+ mcl->args[1] = mmu - rx_mmu;
-+ mcl->args[2] = 0;
-+ mcl->args[3] = DOMID_SELF;
-+ mcl++;
-+ (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
-+ }
-+
-+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+ if (skb->len > (dev->mtu + ETH_HLEN)) {
-+ if (net_ratelimit())
-+ printk(KERN_INFO "Received packet too big for "
-+ "MTU (%d > %d)\n",
-+ skb->len - ETH_HLEN, dev->mtu);
-+ skb->len = 0;
-+ skb->tail = skb->data;
-+ init_skb_shinfo(skb);
-+ dev_kfree_skb(skb);
-+ continue;
-+ }
-+
-+ /*
-+ * Enough room in skbuff for the data we were passed? Also,
-+ * Linux expects at least 16 bytes headroom in each rx buffer.
-+ */
-+ if (unlikely(skb->tail > skb->end) ||
-+ unlikely((skb->data - skb->head) < 16)) {
-+ if (net_ratelimit()) {
-+ if (skb->tail > skb->end)
-+ printk(KERN_INFO "Received packet "
-+ "is %zd bytes beyond tail.\n",
-+ skb->tail - skb->end);
-+ else
-+ printk(KERN_INFO "Received packet "
-+ "is %zd bytes before head.\n",
-+ 16 - (skb->data - skb->head));
-+ }
-+
-+ nskb = alloc_xen_skb(skb->len + 2);
-+ if (nskb != NULL) {
-+ skb_reserve(nskb, 2);
-+ skb_put(nskb, skb->len);
-+ memcpy(nskb->data, skb->data, skb->len);
-+ nskb->dev = skb->dev;
-+ nskb->ip_summed = skb->ip_summed;
-+ }
-+
-+ /* Reinitialise and then destroy the old skbuff. */
-+ skb->len = 0;
-+ skb->tail = skb->data;
-+ init_skb_shinfo(skb);
-+ dev_kfree_skb(skb);
-+
-+ /* Switch old for new, if we copied the buffer. */
-+ if ((skb = nskb) == NULL)
-+ continue;
-+ }
-+
-+ /* Set the shinfo area, which is hidden behind the data. */
-+ init_skb_shinfo(skb);
-+ /* Ethernet work: Delayed to here as it peeks the header. */
-+ skb->protocol = eth_type_trans(skb, dev);
-+
-+ /* Pass it up. */
-+ netif_receive_skb(skb);
-+ dev->last_rx = jiffies;
-+ }
-+
-+ np->rx.rsp_cons = i;
-+
-+ /* If we get a callback with very few responses, reduce fill target. */
-+ /* NB. Note exponential increase, linear decrease. */
-+ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
-+ ((3*np->rx_target) / 4)) &&
-+ (--np->rx_target < np->rx_min_target))
-+ np->rx_target = np->rx_min_target;
-+
-+ network_alloc_rx_buffers(dev);
-+
-+ *pbudget -= work_done;
-+ dev->quota -= work_done;
-+
-+ if (work_done < budget) {
-+ local_irq_save(flags);
-+
-+ RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
-+ if (!more_to_do)
-+ __netif_rx_complete(dev);
-+
-+ local_irq_restore(flags);
-+ }
-+
-+ spin_unlock(&np->rx_lock);
-+
-+ return more_to_do;
-+}
-+
-+
-+static int network_close(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ np->user_state = UST_CLOSED;
-+ netif_stop_queue(np->netdev);
-+ return 0;
-+}
-+
-+
-+static struct net_device_stats *network_get_stats(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ return &np->stats;
-+}
-+
-+static void network_connect(struct net_device *dev)
-+{
-+ struct netfront_info *np;
-+ int i, requeue_idx;
-+ netif_tx_request_t *tx;
-+ struct sk_buff *skb;
-+
-+ np = netdev_priv(dev);
-+ spin_lock_irq(&np->tx_lock);
-+ spin_lock(&np->rx_lock);
-+
-+ /* Recovery procedure: */
-+
-+ /* Step 1: Reinitialise variables. */
-+ np->tx_full = 0;
-+
-+ /*
-+ * Step 2: Rebuild the RX and TX ring contents.
-+ * NB. We could just free the queued TX packets now but we hope
-+ * that sending them out might do some good. We have to rebuild
-+ * the RX ring because some of our pages are currently flipped out
-+ * so we can't just free the RX skbs.
-+ * NB2. Freelist index entries are always going to be less than
-+ * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
-+ * greater than __PAGE_OFFSET: we use this property to distinguish
-+ * them.
-+ */
-+
-+ /*
-+ * Rebuild the TX buffer freelist and the TX ring itself.
-+ * NB. This reorders packets. We could keep more private state
-+ * to avoid this but maybe it doesn't matter so much given the
-+ * interface has been down.
-+ */
-+ for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
-+ if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
-+ continue;
-+
-+ skb = np->tx_skbs[i];
-+
-+ tx = RING_GET_REQUEST(&np->tx, requeue_idx);
-+ requeue_idx++;
-+
-+ tx->id = i;
-+ gnttab_grant_foreign_access_ref(
-+ np->grant_tx_ref[i], np->xbdev->otherend_id,
-+ virt_to_mfn(np->tx_skbs[i]->data),
-+ GNTMAP_readonly);
-+ tx->gref = np->grant_tx_ref[i];
-+ tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
-+ tx->size = skb->len;
-+ tx->flags = (skb->ip_summed == CHECKSUM_HW) ?
-+ NETTXF_csum_blank : 0;
-+
-+ np->stats.tx_bytes += skb->len;
-+ np->stats.tx_packets++;
-+ }
-+
-+ np->tx.req_prod_pvt = requeue_idx;
-+ RING_PUSH_REQUESTS(&np->tx);
-+
-+ /* Rebuild the RX buffer freelist and the RX ring itself. */
-+ for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) {
-+ if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
-+ continue;
-+ gnttab_grant_foreign_transfer_ref(
-+ np->grant_rx_ref[i], np->xbdev->otherend_id,
-+ __pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
-+ RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
-+ np->grant_rx_ref[i];
-+ RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
-+ requeue_idx++;
-+ }
-+
-+ np->rx.req_prod_pvt = requeue_idx;
-+ RING_PUSH_REQUESTS(&np->rx);
-+
-+ /*
-+ * Step 3: All public and private state should now be sane. Get
-+ * ready to start sending and receiving packets and give the driver
-+ * domain a kick because we've probably just requeued some
-+ * packets.
-+ */
-+ np->backend_state = BEST_CONNECTED;
-+ notify_remote_via_irq(np->irq);
-+ network_tx_buf_gc(dev);
-+
-+ if (np->user_state == UST_OPEN)
-+ netif_start_queue(dev);
-+
-+ spin_unlock(&np->rx_lock);
-+ spin_unlock_irq(&np->tx_lock);
-+}
-+
-+static void show_device(struct netfront_info *np)
-+{
-+#ifdef DEBUG
-+ if (np) {
-+ IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n",
-+ np->handle,
-+ be_state_name[np->backend_state],
-+ np->user_state ? "open" : "closed",
-+ np->evtchn,
-+ np->tx,
-+ np->rx);
-+ } else
-+ IPRINTK("<vif NULL>\n");
-+#endif
-+}
-+
-+static void netif_uninit(struct net_device *dev)
-+{
-+ struct netfront_info *np = netdev_priv(dev);
-+ gnttab_free_grant_references(np->gref_tx_head);
-+ gnttab_free_grant_references(np->gref_rx_head);
-+}
-+
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+ .get_tx_csum = ethtool_op_get_tx_csum,
-+ .set_tx_csum = ethtool_op_set_tx_csum,
-+};
-+
-+/** Create a network device.
-+ * @param handle device handle
-+ * @param val return parameter for created device
-+ * @return 0 on success, error code otherwise
-+ */
-+static int create_netdev(int handle, struct xenbus_device *dev,
-+ struct net_device **val)
-+{
-+ int i, err = 0;
-+ struct net_device *netdev = NULL;
-+ struct netfront_info *np = NULL;
-+
-+ if ((netdev = alloc_etherdev(sizeof(struct netfront_info))) == NULL) {
-+ printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
-+ __FUNCTION__);
-+ err = -ENOMEM;
-+ goto exit;
-+ }
-+
-+ np = netdev_priv(netdev);
-+ np->backend_state = BEST_CLOSED;
-+ np->user_state = UST_CLOSED;
-+ np->handle = handle;
-+ np->xbdev = dev;
-+
-+ spin_lock_init(&np->tx_lock);
-+ spin_lock_init(&np->rx_lock);
-+
-+ skb_queue_head_init(&np->rx_batch);
-+ np->rx_target = RX_DFL_MIN_TARGET;
-+ np->rx_min_target = RX_DFL_MIN_TARGET;
-+ np->rx_max_target = RX_MAX_TARGET;
-+
-+ init_timer(&np->rx_refill_timer);
-+ np->rx_refill_timer.data = (unsigned long)netdev;
-+ np->rx_refill_timer.function = rx_refill_timeout;
-+
-+ /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
-+ for (i = 0; i <= NET_TX_RING_SIZE; i++) {
-+ np->tx_skbs[i] = (void *)((unsigned long) i+1);
-+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
-+ }
-+
-+ for (i = 0; i <= NET_RX_RING_SIZE; i++) {
-+ np->rx_skbs[i] = (void *)((unsigned long) i+1);
-+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
-+ }
-+
-+ /* A grant for every tx ring slot */
-+ if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
-+ &np->gref_tx_head) < 0) {
-+ printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
-+ err = -ENOMEM;
-+ goto exit;
-+ }
-+ /* A grant for every rx ring slot */
-+ if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
-+ &np->gref_rx_head) < 0) {
-+ printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
-+ gnttab_free_grant_references(np->gref_tx_head);
-+ err = -ENOMEM;
-+ goto exit;
-+ }
-+
-+ netdev->open = network_open;
-+ netdev->hard_start_xmit = network_start_xmit;
-+ netdev->stop = network_close;
-+ netdev->get_stats = network_get_stats;
-+ netdev->poll = netif_poll;
-+ netdev->uninit = netif_uninit;
-+ netdev->weight = 64;
-+ netdev->features = NETIF_F_IP_CSUM;
-+
-+ SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
-+ SET_MODULE_OWNER(netdev);
-+ SET_NETDEV_DEV(netdev, &dev->dev);
-+
-+ if ((err = register_netdev(netdev)) != 0) {
-+ printk(KERN_WARNING "%s> register_netdev err=%d\n",
-+ __FUNCTION__, err);
-+ goto exit_free_grefs;
-+ }
-+
-+ if ((err = xennet_proc_addif(netdev)) != 0) {
-+ unregister_netdev(netdev);
-+ goto exit_free_grefs;
-+ }
-+
-+ np->netdev = netdev;
-+
-+ exit:
-+ if (err != 0)
-+ kfree(netdev);
-+ else if (val != NULL)
-+ *val = netdev;
-+ return err;
-+
-+ exit_free_grefs:
-+ gnttab_free_grant_references(np->gref_tx_head);
-+ gnttab_free_grant_references(np->gref_rx_head);
-+ goto exit;
-+}
-+
-+/*
-+ * We use this notifier to send out a fake ARP reply to reset switches and
-+ * router ARP caches when an IP interface is brought up on a VIF.
-+ */
-+static int
-+inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
-+ struct net_device *dev = ifa->ifa_dev->dev;
-+
-+ /* UP event and is it one of our devices? */
-+ if (event == NETDEV_UP && dev->open == network_open)
-+ (void)send_fake_arp(dev);
-+
-+ return NOTIFY_DONE;
-+}
-+
-+
-+/* ** Close down ** */
-+
-+
-+/**
-+ * Handle the change of state of the backend to Closing. We must delete our
-+ * device-layer structures now, to ensure that writes are flushed through to
-+ * the backend. Once is this done, we can switch to Closed in
-+ * acknowledgement.
-+ */
-+static void netfront_closing(struct xenbus_device *dev)
-+{
-+ struct netfront_info *info = dev->data;
-+
-+ DPRINTK("netfront_closing: %s removed\n", dev->nodename);
-+
-+ close_netdev(info);
-+
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
-+}
-+
-+
-+static int netfront_remove(struct xenbus_device *dev)
-+{
-+ struct netfront_info *info = dev->data;
-+
-+ DPRINTK("%s\n", dev->nodename);
-+
-+ netif_disconnect_backend(info);
-+ free_netdev(info->netdev);
-+
-+ return 0;
-+}
-+
-+
-+static void close_netdev(struct netfront_info *info)
-+{
-+ spin_lock_irq(&info->netdev->xmit_lock);
-+ netif_stop_queue(info->netdev);
-+ spin_unlock_irq(&info->netdev->xmit_lock);
-+
-+#ifdef CONFIG_PROC_FS
-+ xennet_proc_delif(info->netdev);
-+#endif
-+
-+ del_timer_sync(&info->rx_refill_timer);
-+
-+ unregister_netdev(info->netdev);
-+}
-+
-+
-+static void netif_disconnect_backend(struct netfront_info *info)
-+{
-+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
-+ spin_lock_irq(&info->tx_lock);
-+ spin_lock(&info->rx_lock);
-+ info->backend_state = BEST_DISCONNECTED;
-+ spin_unlock(&info->rx_lock);
-+ spin_unlock_irq(&info->tx_lock);
-+
-+ if (info->irq)
-+ unbind_from_irqhandler(info->irq, info->netdev);
-+ info->evtchn = info->irq = 0;
-+
-+ end_access(info->tx_ring_ref, info->tx.sring);
-+ end_access(info->rx_ring_ref, info->rx.sring);
-+ info->tx_ring_ref = GRANT_INVALID_REF;
-+ info->rx_ring_ref = GRANT_INVALID_REF;
-+ info->tx.sring = NULL;
-+ info->rx.sring = NULL;
-+}
-+
-+
-+static void netif_free(struct netfront_info *info)
-+{
-+ close_netdev(info);
-+ netif_disconnect_backend(info);
-+ free_netdev(info->netdev);
-+}
-+
-+
-+static void end_access(int ref, void *page)
-+{
-+ if (ref != GRANT_INVALID_REF)
-+ gnttab_end_foreign_access(ref, 0, (unsigned long)page);
-+}
-+
-+
-+/* ** Driver registration ** */
-+
-+
-+static struct xenbus_device_id netfront_ids[] = {
-+ { "vif" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver netfront = {
-+ .name = "vif",
-+ .owner = THIS_MODULE,
-+ .ids = netfront_ids,
-+ .probe = netfront_probe,
-+ .remove = netfront_remove,
-+ .resume = netfront_resume,
-+ .otherend_changed = backend_changed,
-+};
-+
-+
-+static struct notifier_block notifier_inetdev = {
-+ .notifier_call = inetdev_notify,
-+ .next = NULL,
-+ .priority = 0
-+};
-+
-+static int __init netif_init(void)
-+{
-+ int err = 0;
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN)
-+ return 0;
-+
-+ if ((err = xennet_proc_init()) != 0)
-+ return err;
-+
-+ IPRINTK("Initialising virtual ethernet driver.\n");
-+
-+ (void)register_inetaddr_notifier(&notifier_inetdev);
-+
-+ return xenbus_register_frontend(&netfront);
-+}
-+module_init(netif_init);
-+
-+
-+static void netif_exit(void)
-+{
-+ unregister_inetaddr_notifier(&notifier_inetdev);
-+
-+ return xenbus_unregister_driver(&netfront);
-+}
-+module_exit(netif_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+
-+/* ** /proc **/
-+
-+
-+#ifdef CONFIG_PROC_FS
-+
-+#define TARGET_MIN 0UL
-+#define TARGET_MAX 1UL
-+#define TARGET_CUR 2UL
-+
-+static int xennet_proc_read(
-+ char *page, char **start, off_t off, int count, int *eof, void *data)
-+{
-+ struct net_device *dev =
-+ (struct net_device *)((unsigned long)data & ~3UL);
-+ struct netfront_info *np = netdev_priv(dev);
-+ int len = 0, which_target = (long)data & 3;
-+
-+ switch (which_target) {
-+ case TARGET_MIN:
-+ len = sprintf(page, "%d\n", np->rx_min_target);
-+ break;
-+ case TARGET_MAX:
-+ len = sprintf(page, "%d\n", np->rx_max_target);
-+ break;
-+ case TARGET_CUR:
-+ len = sprintf(page, "%d\n", np->rx_target);
-+ break;
-+ }
-+
-+ *eof = 1;
-+ return len;
-+}
-+
-+static int xennet_proc_write(
-+ struct file *file, const char __user *buffer,
-+ unsigned long count, void *data)
-+{
-+ struct net_device *dev =
-+ (struct net_device *)((unsigned long)data & ~3UL);
-+ struct netfront_info *np = netdev_priv(dev);
-+ int which_target = (long)data & 3;
-+ char string[64];
-+ long target;
-+
-+ if (!capable(CAP_SYS_ADMIN))
-+ return -EPERM;
-+
-+ if (count <= 1)
-+ return -EBADMSG; /* runt */
-+ if (count > sizeof(string))
-+ return -EFBIG; /* too long */
-+
-+ if (copy_from_user(string, buffer, count))
-+ return -EFAULT;
-+ string[sizeof(string)-1] = '\0';
-+
-+ target = simple_strtol(string, NULL, 10);
-+ if (target < RX_MIN_TARGET)
-+ target = RX_MIN_TARGET;
-+ if (target > RX_MAX_TARGET)
-+ target = RX_MAX_TARGET;
-+
-+ spin_lock(&np->rx_lock);
-+
-+ switch (which_target) {
-+ case TARGET_MIN:
-+ if (target > np->rx_max_target)
-+ np->rx_max_target = target;
-+ np->rx_min_target = target;
-+ if (target > np->rx_target)
-+ np->rx_target = target;
-+ break;
-+ case TARGET_MAX:
-+ if (target < np->rx_min_target)
-+ np->rx_min_target = target;
-+ np->rx_max_target = target;
-+ if (target < np->rx_target)
-+ np->rx_target = target;
-+ break;
-+ case TARGET_CUR:
-+ break;
-+ }
-+
-+ network_alloc_rx_buffers(dev);
-+
-+ spin_unlock(&np->rx_lock);
-+
-+ return count;
-+}
-+
-+static int xennet_proc_init(void)
-+{
-+ if (proc_mkdir("xen/net", NULL) == NULL)
-+ return -ENOMEM;
-+ return 0;
-+}
-+
-+static int xennet_proc_addif(struct net_device *dev)
-+{
-+ struct proc_dir_entry *dir, *min, *max, *cur;
-+ char name[30];
-+
-+ sprintf(name, "xen/net/%s", dev->name);
-+
-+ dir = proc_mkdir(name, NULL);
-+ if (!dir)
-+ goto nomem;
-+
-+ min = create_proc_entry("rxbuf_min", 0644, dir);
-+ max = create_proc_entry("rxbuf_max", 0644, dir);
-+ cur = create_proc_entry("rxbuf_cur", 0444, dir);
-+ if (!min || !max || !cur)
-+ goto nomem;
-+
-+ min->read_proc = xennet_proc_read;
-+ min->write_proc = xennet_proc_write;
-+ min->data = (void *)((unsigned long)dev | TARGET_MIN);
-+
-+ max->read_proc = xennet_proc_read;
-+ max->write_proc = xennet_proc_write;
-+ max->data = (void *)((unsigned long)dev | TARGET_MAX);
-+
-+ cur->read_proc = xennet_proc_read;
-+ cur->write_proc = xennet_proc_write;
-+ cur->data = (void *)((unsigned long)dev | TARGET_CUR);
-+
-+ return 0;
-+
-+ nomem:
-+ xennet_proc_delif(dev);
-+ return -ENOMEM;
-+}
-+
-+static void xennet_proc_delif(struct net_device *dev)
-+{
-+ char name[30];
-+
-+ sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
-+ remove_proc_entry(name, NULL);
-+
-+ sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
-+ remove_proc_entry(name, NULL);
-+
-+ sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
-+ remove_proc_entry(name, NULL);
-+
-+ sprintf(name, "xen/net/%s", dev->name);
-+ remove_proc_entry(name, NULL);
-+}
-+
-+#endif
-+
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/pciback/Makefile b/drivers/xen/pciback/Makefile
-new file mode 100644
-index 0000000..e031caa
---- /dev/null
-+++ b/drivers/xen/pciback/Makefile
-@@ -0,0 +1,10 @@
-+obj-y += pciback.o
-+
-+pciback-y := pci_stub.o pciback_ops.o xenbus.o
-+pciback-y += conf_space.o conf_space_header.o
-+pciback-${CONFIG_XEN_PCIDEV_BACKEND_VPCI} += vpci.o
-+pciback-${CONFIG_XEN_PCIDEV_BACKEND_PASS} += passthrough.o
-+
-+ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
-+EXTRA_CFLAGS += -DDEBUG
-+endif
-diff --git a/drivers/xen/pciback/conf_space.c b/drivers/xen/pciback/conf_space.c
-new file mode 100644
-index 0000000..f08eafa
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space.c
-@@ -0,0 +1,324 @@
-+/*
-+ * PCI Backend - Functions for creating a virtual configuration space for
-+ * exported PCI Devices.
-+ * It's dangerous to allow PCI Driver Domains to change their
-+ * device's resources (memory, i/o ports, interrupts). We need to
-+ * restrict changes to certain PCI Configuration registers:
-+ * BARs, INTERRUPT_PIN, most registers in the header...
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+
-+#define DEFINE_PCI_CONFIG(op,size,type) \
-+int pciback_##op##_config_##size \
-+(struct pci_dev *dev, int offset, type value, void *data) \
-+{ \
-+ return pci_##op##_config_##size (dev, offset, value); \
-+}
-+
-+DEFINE_PCI_CONFIG(read, byte, u8 *)
-+DEFINE_PCI_CONFIG(read, word, u16 *)
-+DEFINE_PCI_CONFIG(read, dword, u32 *)
-+
-+DEFINE_PCI_CONFIG(write, byte, u8)
-+DEFINE_PCI_CONFIG(write, word, u16)
-+DEFINE_PCI_CONFIG(write, dword, u32)
-+
-+static int conf_space_read(struct pci_dev *dev,
-+ struct config_field_entry *entry, int offset,
-+ u32 * value)
-+{
-+ int ret = 0;
-+ struct config_field *field = entry->field;
-+
-+ *value = 0;
-+
-+ switch (field->size) {
-+ case 1:
-+ if (field->u.b.read)
-+ ret = field->u.b.read(dev, offset, (u8 *) value,
-+ entry->data);
-+ break;
-+ case 2:
-+ if (field->u.w.read)
-+ ret = field->u.w.read(dev, offset, (u16 *) value,
-+ entry->data);
-+ break;
-+ case 4:
-+ if (field->u.dw.read)
-+ ret = field->u.dw.read(dev, offset, value, entry->data);
-+ break;
-+ }
-+ return ret;
-+}
-+
-+static int conf_space_write(struct pci_dev *dev,
-+ struct config_field_entry *entry, int offset,
-+ u32 value)
-+{
-+ int ret = 0;
-+ struct config_field *field = entry->field;
-+
-+ switch (field->size) {
-+ case 1:
-+ if (field->u.b.write)
-+ ret = field->u.b.write(dev, offset, (u8) value,
-+ entry->data);
-+ break;
-+ case 2:
-+ if (field->u.w.write)
-+ ret = field->u.w.write(dev, offset, (u16) value,
-+ entry->data);
-+ break;
-+ case 4:
-+ if (field->u.dw.write)
-+ ret = field->u.dw.write(dev, offset, value,
-+ entry->data);
-+ break;
-+ }
-+ return ret;
-+}
-+
-+static inline u32 get_mask(int size)
-+{
-+ if (size == 1)
-+ return 0xff;
-+ else if (size == 2)
-+ return 0xffff;
-+ else
-+ return 0xffffffff;
-+}
-+
-+static inline int valid_request(int offset, int size)
-+{
-+ /* Validate request (no un-aligned requests) */
-+ if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
-+ return 1;
-+ return 0;
-+}
-+
-+static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
-+ u32 offset)
-+{
-+ if (offset >= 0) {
-+ new_val_mask <<= (offset * 8);
-+ new_val <<= (offset * 8);
-+ } else {
-+ new_val_mask >>= (offset * -8);
-+ new_val >>= (offset * -8);
-+ }
-+ val = (val & ~new_val_mask) | (new_val & new_val_mask);
-+
-+ return val;
-+}
-+
-+static int pcibios_err_to_errno(int err)
-+{
-+ switch (err) {
-+ case PCIBIOS_SUCCESSFUL:
-+ return XEN_PCI_ERR_success;
-+ case PCIBIOS_DEVICE_NOT_FOUND:
-+ return XEN_PCI_ERR_dev_not_found;
-+ case PCIBIOS_BAD_REGISTER_NUMBER:
-+ return XEN_PCI_ERR_invalid_offset;
-+ case PCIBIOS_FUNC_NOT_SUPPORTED:
-+ return XEN_PCI_ERR_not_implemented;
-+ case PCIBIOS_SET_FAILED:
-+ return XEN_PCI_ERR_access_denied;
-+ }
-+ return err;
-+}
-+
-+int pciback_config_read(struct pci_dev *dev, int offset, int size,
-+ u32 * ret_val)
-+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ struct config_field *field;
-+ int req_start, req_end, field_start, field_end;
-+ /* if read fails for any reason, return 0 (as if device didn't respond) */
-+ u32 value = 0, tmp_val;
-+
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
-+ pci_name(dev), size, offset);
-+
-+ if (!valid_request(offset, size)) {
-+ err = XEN_PCI_ERR_invalid_offset;
-+ goto out;
-+ }
-+
-+ /* Get the real value first, then modify as appropriate */
-+ switch (size) {
-+ case 1:
-+ err = pci_read_config_byte(dev, offset, (u8 *) & value);
-+ break;
-+ case 2:
-+ err = pci_read_config_word(dev, offset, (u16 *) & value);
-+ break;
-+ case 4:
-+ err = pci_read_config_dword(dev, offset, &value);
-+ break;
-+ }
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+
-+ req_start = offset;
-+ req_end = offset + size;
-+ field_start = field->offset;
-+ field_end = field->offset + field->size;
-+
-+ if ((req_start >= field_start && req_start < field_end)
-+ || (req_end > field_start && req_end <= field_end)) {
-+ err = conf_space_read(dev, cfg_entry, offset, &tmp_val);
-+ if (err)
-+ goto out;
-+
-+ value = merge_value(value, tmp_val,
-+ get_mask(field->size),
-+ field_start - req_start);
-+ }
-+ }
-+
-+ out:
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
-+ pci_name(dev), size, offset, value);
-+
-+ *ret_val = value;
-+ return pcibios_err_to_errno(err);
-+}
-+
-+int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
-+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ struct config_field *field;
-+ u32 tmp_val;
-+ int req_start, req_end, field_start, field_end;
-+
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG
-+ "pciback: %s: write request %d bytes at 0x%x = %x\n",
-+ pci_name(dev), size, offset, value);
-+
-+ if (!valid_request(offset, size))
-+ return XEN_PCI_ERR_invalid_offset;
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+
-+ req_start = offset;
-+ req_end = offset + size;
-+ field_start = field->offset;
-+ field_end = field->offset + field->size;
-+
-+ if ((req_start >= field_start && req_start < field_end)
-+ || (req_end > field_start && req_end <= field_end)) {
-+ tmp_val = 0;
-+
-+ err = pciback_config_read(dev, offset, size, &tmp_val);
-+ if (err)
-+ break;
-+
-+ tmp_val = merge_value(tmp_val, value, get_mask(size),
-+ field_start - req_start);
-+
-+ err = conf_space_write(dev, cfg_entry, offset, tmp_val);
-+ }
-+ }
-+
-+ return pcibios_err_to_errno(err);
-+}
-+
-+void pciback_config_reset(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ struct config_field *field;
-+
-+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
-+ field = cfg_entry->field;
-+
-+ if (field->reset)
-+ field->reset(dev, field->offset, cfg_entry->data);
-+ }
-+}
-+
-+void pciback_config_free(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry, *t;
-+ struct config_field *field;
-+
-+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
-+ list_del(&cfg_entry->list);
-+
-+ field = cfg_entry->field;
-+
-+ if (field->release)
-+ field->release(dev, field->offset, cfg_entry->data);
-+
-+ kfree(cfg_entry);
-+ }
-+}
-+
-+int pciback_config_add_field(struct pci_dev *dev, struct config_field *field)
-+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+ struct config_field_entry *cfg_entry;
-+ void *tmp;
-+
-+ cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
-+ if (!cfg_entry) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ cfg_entry->data = NULL;
-+ cfg_entry->field = field;
-+
-+ if (field->init) {
-+ tmp = field->init(dev, field->offset);
-+
-+ if (IS_ERR(tmp)) {
-+ err = PTR_ERR(tmp);
-+ goto out;
-+ }
-+
-+ cfg_entry->data = tmp;
-+ }
-+
-+ list_add_tail(&cfg_entry->list, &dev_data->config_fields);
-+
-+ out:
-+ if (err)
-+ kfree(cfg_entry);
-+
-+ return err;
-+}
-+
-+/* This sets up the device's virtual configuration space to keep track of
-+ * certain registers (like the base address registers (BARs) so that we can
-+ * keep the client from manipulating them directly.
-+ */
-+int pciback_config_init(struct pci_dev *dev)
-+{
-+ int err = 0;
-+ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
-+
-+ INIT_LIST_HEAD(&dev_data->config_fields);
-+
-+ err = pciback_config_header_add_fields(dev);
-+
-+ return err;
-+}
-diff --git a/drivers/xen/pciback/conf_space.h b/drivers/xen/pciback/conf_space.h
-new file mode 100644
-index 0000000..3ef8365
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space.h
-@@ -0,0 +1,97 @@
-+/*
-+ * PCI Backend - Common data structures for overriding the configuration space
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#ifndef __XEN_PCIBACK_CONF_SPACE_H__
-+#define __XEN_PCIBACK_CONF_SPACE_H__
-+
-+#include <linux/list.h>
-+
-+typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
-+typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
-+typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
-+
-+typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
-+ void *data);
-+typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
-+ void *data);
-+typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
-+ void *data);
-+typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
-+ void *data);
-+typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
-+ void *data);
-+typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
-+ void *data);
-+
-+/* These are the fields within the configuration space which we
-+ * are interested in intercepting reads/writes to and changing their
-+ * values.
-+ */
-+struct config_field {
-+ unsigned int offset;
-+ unsigned int size;
-+ conf_field_init init;
-+ conf_field_reset reset;
-+ conf_field_free release;
-+ union {
-+ struct {
-+ conf_dword_write write;
-+ conf_dword_read read;
-+ } dw;
-+ struct {
-+ conf_word_write write;
-+ conf_word_read read;
-+ } w;
-+ struct {
-+ conf_byte_write write;
-+ conf_byte_read read;
-+ } b;
-+ } u;
-+};
-+
-+struct config_field_entry {
-+ struct list_head list;
-+ struct config_field *field;
-+ void *data;
-+};
-+
-+/* Add fields to a device - the add_fields macro expects to get a pointer to
-+ * the first entry in an array (of which the ending is marked by size==0)
-+ */
-+int pciback_config_add_field(struct pci_dev *dev, struct config_field *field);
-+static inline int pciback_config_add_fields(struct pci_dev *dev,
-+ struct config_field *field)
-+{
-+ int i, err = 0;
-+ for (i = 0; field[i].size != 0; i++) {
-+ err = pciback_config_add_field(dev, &field[i]);
-+ if (err)
-+ break;
-+ }
-+ return err;
-+}
-+
-+/* Initializers which add fields to the virtual configuration space
-+ * ** We could add initializers to allow a guest domain to touch
-+ * the capability lists (for power management, the AGP bridge, etc.)
-+ */
-+int pciback_config_header_add_fields(struct pci_dev *dev);
-+
-+/* Read/Write the real configuration space */
-+int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
-+ void *data);
-+int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
-+ void *data);
-+int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
-+ void *data);
-+int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
-+ void *data);
-+int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
-+ void *data);
-+int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
-+ void *data);
-+
-+#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
-diff --git a/drivers/xen/pciback/conf_space_header.c b/drivers/xen/pciback/conf_space_header.c
-new file mode 100644
-index 0000000..17607d3
---- /dev/null
-+++ b/drivers/xen/pciback/conf_space_header.c
-@@ -0,0 +1,269 @@
-+/*
-+ * PCI Backend - Handles the virtual fields in the configuration space headers.
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+#include "conf_space.h"
-+
-+struct pci_bar_info {
-+ u32 val;
-+ u32 len_val;
-+ int which;
-+};
-+
-+#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
-+#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
-+
-+static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
-+{
-+ if (!dev->is_enabled && is_enable_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: enable\n",
-+ pci_name(dev));
-+ dev->is_enabled = 1;
-+ pcibios_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
-+ } else if (dev->is_enabled && !is_enable_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: disable\n",
-+ pci_name(dev));
-+ pciback_disable_device(dev);
-+ }
-+
-+ if (!dev->is_busmaster && is_master_cmd(value)) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG "pciback: %s: set bus master\n",
-+ pci_name(dev));
-+ dev->is_busmaster = 1;
-+ pcibios_set_master(dev);
-+ }
-+
-+ if (value & PCI_COMMAND_INVALIDATE) {
-+ if (unlikely(verbose_request))
-+ printk(KERN_DEBUG
-+ "pciback: %s: enable memory-write-invalidate\n",
-+ pci_name(dev));
-+ pci_set_mwi(dev);
-+ }
-+
-+ return pci_write_config_word(dev, offset, value);
-+}
-+
-+static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
-+{
-+ struct pci_bar_info *bar = data;
-+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
-+ }
-+
-+ /* A write to obtain the length must happen as a 32-bit write.
-+ * This does not (yet) support writing individual bytes
-+ */
-+ if (value == ~PCI_ROM_ADDRESS_ENABLE)
-+ bar->which = 1;
-+ else
-+ bar->which = 0;
-+
-+ /* Do we need to support enabling/disabling the rom address here? */
-+
-+ return 0;
-+}
-+
-+/* For the BARs, only allow writes which write ~0 or
-+ * the correct resource information
-+ * (Needed for when the driver probes the resource usage)
-+ */
-+static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
-+{
-+ struct pci_bar_info *bar = data;
-+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
-+ }
-+
-+ /* A write to obtain the length must happen as a 32-bit write.
-+ * This does not (yet) support writing individual bytes
-+ */
-+ if (value == ~0)
-+ bar->which = 1;
-+ else
-+ bar->which = 0;
-+
-+ return 0;
-+}
-+
-+static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
-+{
-+ struct pci_bar_info *bar = data;
-+
-+ if (unlikely(!bar)) {
-+ printk(KERN_WARNING "pciback: driver data not found for %s\n",
-+ pci_name(dev));
-+ return XEN_PCI_ERR_op_failed;
-+ }
-+
-+ *value = bar->which ? bar->len_val : bar->val;
-+
-+ return 0;
-+}
-+
-+static inline void read_dev_bar(struct pci_dev *dev,
-+ struct pci_bar_info *bar_info, int offset,
-+ u32 len_mask)
-+{
-+ pci_read_config_dword(dev, offset, &bar_info->val);
-+ pci_write_config_dword(dev, offset, len_mask);
-+ pci_read_config_dword(dev, offset, &bar_info->len_val);
-+ pci_write_config_dword(dev, offset, bar_info->val);
-+}
-+
-+static void *bar_init(struct pci_dev *dev, int offset)
-+{
-+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
-+
-+ if (!bar)
-+ return ERR_PTR(-ENOMEM);
-+
-+ read_dev_bar(dev, bar, offset, ~0);
-+ bar->which = 0;
-+
-+ return bar;
-+}
-+
-+static void *rom_init(struct pci_dev *dev, int offset)
-+{
-+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
-+
-+ if (!bar)
-+ return ERR_PTR(-ENOMEM);
-+
-+ read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
-+ bar->which = 0;
-+
-+ return bar;
-+}
-+
-+static void bar_reset(struct pci_dev *dev, int offset, void *data)
-+{
-+ struct pci_bar_info *bar = data;
-+
-+ bar->which = 0;
-+}
-+
-+static void bar_release(struct pci_dev *dev, int offset, void *data)
-+{
-+ kfree(data);
-+}
-+
-+static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
-+ void *data)
-+{
-+ *value = (u8) dev->irq;
-+
-+ return 0;
-+}
-+
-+struct config_field header_common[] = {
-+ {
-+ .offset = PCI_COMMAND,
-+ .size = 2,
-+ .u.w.read = pciback_read_config_word,
-+ .u.w.write = command_write,
-+ },
-+ {
-+ .offset = PCI_INTERRUPT_LINE,
-+ .size = 1,
-+ .u.b.read = interrupt_read,
-+ .u.b.write = NULL,
-+ },
-+ {
-+ /* Any side effects of letting driver domain control cache line? */
-+ .offset = PCI_CACHE_LINE_SIZE,
-+ .size = 1,
-+ .u.b.read = pciback_read_config_byte,
-+ .u.b.write = pciback_write_config_byte,
-+ },
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+#define CFG_FIELD_BAR(reg_offset) \
-+ { \
-+ .offset = reg_offset, \
-+ .size = 4, \
-+ .init = bar_init, \
-+ .reset = bar_reset, \
-+ .release = bar_release, \
-+ .u.dw.read = bar_read, \
-+ .u.dw.write = bar_write, \
-+ }
-+
-+#define CFG_FIELD_ROM(reg_offset) \
-+ { \
-+ .offset = reg_offset, \
-+ .size = 4, \
-+ .init = rom_init, \
-+ .reset = bar_reset, \
-+ .release = bar_release, \
-+ .u.dw.read = bar_read, \
-+ .u.dw.write = rom_write, \
-+ }
-+
-+struct config_field header_0[] = {
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
-+ CFG_FIELD_ROM(PCI_ROM_ADDRESS),
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+struct config_field header_1[] = {
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
-+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
-+ CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
-+ {
-+ .size = 0,
-+ },
-+};
-+
-+int pciback_config_header_add_fields(struct pci_dev *dev)
-+{
-+ int err;
-+
-+ err = pciback_config_add_fields(dev, header_common);
-+ if (err)
-+ goto out;
-+
-+ switch (dev->hdr_type) {
-+ case PCI_HEADER_TYPE_NORMAL:
-+ err = pciback_config_add_fields(dev, header_0);
-+ break;
-+
-+ case PCI_HEADER_TYPE_BRIDGE:
-+ err = pciback_config_add_fields(dev, header_1);
-+ break;
-+
-+ default:
-+ err = -EINVAL;
-+ printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
-+ pci_name(dev), dev->hdr_type);
-+ break;
-+ }
-+
-+ out:
-+ return err;
-+}
-diff --git a/drivers/xen/pciback/passthrough.c b/drivers/xen/pciback/passthrough.c
-new file mode 100644
-index 0000000..e5b7fbb
---- /dev/null
-+++ b/drivers/xen/pciback/passthrough.c
-@@ -0,0 +1,116 @@
-+/*
-+ * PCI Backend - Provides restricted access to the real PCI bus topology
-+ * to the frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+
-+struct passthrough_dev_data {
-+ struct list_head dev_list;
-+};
-+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry;
-+
-+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
-+ if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
-+ && bus == (unsigned int)dev_entry->dev->bus->number
-+ && devfn == dev_entry->dev->devfn)
-+ return dev_entry->dev;
-+ }
-+
-+ return NULL;
-+}
-+
-+/* Must hold pciback_device->dev_lock when calling this */
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry;
-+
-+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
-+ if (!dev_entry)
-+ return -ENOMEM;
-+ dev_entry->dev = dev;
-+
-+ list_add_tail(&dev_entry->list, &dev_data->dev_list);
-+
-+ return 0;
-+}
-+
-+int pciback_init_devices(struct pciback_device *pdev)
-+{
-+ struct passthrough_dev_data *dev_data;
-+
-+ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
-+ if (!dev_data)
-+ return -ENOMEM;
-+
-+ INIT_LIST_HEAD(&dev_data->dev_list);
-+
-+ pdev->pci_dev_data = dev_data;
-+
-+ return 0;
-+}
-+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_root_cb)
-+{
-+ int err = 0;
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry, *e;
-+ struct pci_dev *dev;
-+ int found;
-+ unsigned int domain, bus;
-+
-+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
-+ /* Only publish this device as a root if none of its
-+ * parent bridges are exported
-+ */
-+ found = 0;
-+ dev = dev_entry->dev->bus->self;
-+ for (; !found && dev != NULL; dev = dev->bus->self) {
-+ list_for_each_entry(e, &dev_data->dev_list, list) {
-+ if (dev == e->dev) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ }
-+
-+ domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
-+ bus = (unsigned int)dev_entry->dev->bus->number;
-+
-+ if (!found) {
-+ err = publish_root_cb(pdev, domain, bus);
-+ if (err)
-+ break;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+/* Must hold pciback_device->dev_lock when calling this */
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
-+ struct pci_dev_entry *dev_entry, *t;
-+
-+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
-+ list_del(&dev_entry->list);
-+ pcistub_put_pci_dev(dev_entry->dev);
-+ kfree(dev_entry);
-+ }
-+
-+ kfree(dev_data);
-+ pdev->pci_dev_data = NULL;
-+}
-diff --git a/drivers/xen/pciback/pci_stub.c b/drivers/xen/pciback/pci_stub.c
-new file mode 100644
-index 0000000..aa76d75
---- /dev/null
-+++ b/drivers/xen/pciback/pci_stub.c
-@@ -0,0 +1,377 @@
-+/*
-+ * PCI Stub Driver - Grabs devices in backend to be exported later
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <asm/atomic.h>
-+#include "pciback.h"
-+
-+static char *pci_devs_to_hide = NULL;
-+module_param_named(hide, pci_devs_to_hide, charp, 0444);
-+
-+struct pci_stub_device_id {
-+ struct list_head slot_list;
-+ int domain;
-+ unsigned char bus;
-+ unsigned int devfn;
-+};
-+LIST_HEAD(pci_stub_device_ids);
-+
-+struct pci_stub_device {
-+ struct list_head dev_list;
-+ struct pci_dev *dev;
-+ atomic_t in_use;
-+};
-+/* Access to pci_stub_devices & seized_devices lists and the initialize_devices
-+ * flag must be locked with pci_stub_devices_lock
-+ */
-+DEFINE_SPINLOCK(pci_stub_devices_lock);
-+LIST_HEAD(pci_stub_devices);
-+
-+/* wait for device_initcall before initializing our devices
-+ * (see pcistub_init_devices_late)
-+ */
-+static int initialize_devices = 0;
-+LIST_HEAD(seized_devices);
-+
-+static inline struct pci_dev *get_pci_dev(struct pci_stub_device *psdev)
-+{
-+ if (atomic_dec_and_test(&psdev->in_use))
-+ return psdev->dev;
-+ else {
-+ atomic_inc(&psdev->in_use);
-+ return NULL;
-+ }
-+}
-+
-+struct pci_dev *pcistub_get_pci_dev_by_slot(int domain, int bus,
-+ int slot, int func)
-+{
-+ struct pci_stub_device *psdev;
-+ struct pci_dev *found_dev = NULL;
-+
-+ spin_lock(&pci_stub_devices_lock);
-+
-+ list_for_each_entry(psdev, &pci_stub_devices, dev_list) {
-+ if (psdev->dev != NULL
-+ && domain == pci_domain_nr(psdev->dev->bus)
-+ && bus == psdev->dev->bus->number
-+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
-+ found_dev = get_pci_dev(psdev);
-+ break;
-+ }
-+ }
-+
-+ spin_unlock(&pci_stub_devices_lock);
-+ return found_dev;
-+}
-+
-+struct pci_dev *pcistub_get_pci_dev(struct pci_dev *dev)
-+{
-+ struct pci_stub_device *psdev;
-+ struct pci_dev *found_dev = NULL;
-+
-+ spin_lock(&pci_stub_devices_lock);
-+
-+ list_for_each_entry(psdev, &pci_stub_devices, dev_list) {
-+ if (psdev->dev == dev) {
-+ found_dev = get_pci_dev(psdev);
-+ break;
-+ }
-+ }
-+
-+ spin_unlock(&pci_stub_devices_lock);
-+ return found_dev;
-+}
-+
-+void pcistub_put_pci_dev(struct pci_dev *dev)
-+{
-+ struct pci_stub_device *psdev;
-+
-+ spin_lock(&pci_stub_devices_lock);
-+
-+ list_for_each_entry(psdev, &pci_stub_devices, dev_list) {
-+ if (psdev->dev == dev) {
-+ /* Cleanup our device
-+ * (so it's ready for the next domain)
-+ */
-+ pciback_reset_device(psdev->dev);
-+
-+ atomic_inc(&psdev->in_use);
-+ break;
-+ }
-+ }
-+
-+ spin_unlock(&pci_stub_devices_lock);
-+}
-+
-+static int __devinit pcistub_match(struct pci_dev *dev,
-+ struct pci_stub_device_id *pdev_id)
-+{
-+ /* Match the specified device by domain, bus, slot, func and also if
-+ * any of the device's parent bridges match.
-+ */
-+ for (; dev != NULL; dev = dev->bus->self) {
-+ if (pci_domain_nr(dev->bus) == pdev_id->domain
-+ && dev->bus->number == pdev_id->bus
-+ && dev->devfn == pdev_id->devfn)
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+static int __devinit pcistub_init_device(struct pci_dev *dev)
-+{
-+ struct pciback_dev_data *dev_data;
-+ int err = 0;
-+
-+ /* The PCI backend is not intended to be a module (or to work with
-+ * removable PCI devices (yet). If it were, pciback_config_free()
-+ * would need to be called somewhere to free the memory allocated
-+ * here and then to call kfree(pci_get_drvdata(psdev->dev)).
-+ */
-+ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
-+ if (!dev_data) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+ pci_set_drvdata(dev, dev_data);
-+
-+ err = pciback_config_init(dev);
-+ if (err)
-+ goto out;
-+
-+ /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
-+ * must do this here because pcibios_enable_device may specify
-+ * the pci device's true irq (and possibly its other resources)
-+ * if they differ from what's in the configuration space.
-+ * This makes the assumption that the device's resources won't
-+ * change after this point (otherwise this code may break!)
-+ */
-+ err = pci_enable_device(dev);
-+ if (err)
-+ goto config_release;
-+
-+ /* Now disable the device (this also ensures some private device
-+ * data is setup before we export)
-+ * This calls pciback_config_reset(dev)
-+ */
-+ pciback_reset_device(dev);
-+
-+ return 0;
-+
-+ config_release:
-+ pciback_config_free(dev);
-+
-+ out:
-+ pci_set_drvdata(dev, NULL);
-+ kfree(dev_data);
-+ return err;
-+}
-+
-+/*
-+ * Because some initialization still happens on
-+ * devices during fs_initcall, we need to defer
-+ * full initialization of our devices until
-+ * device_initcall.
-+ */
-+static int __init pcistub_init_devices_late(void)
-+{
-+ struct pci_stub_device *psdev, *t;
-+ int err = 0;
-+
-+ spin_lock(&pci_stub_devices_lock);
-+
-+ list_for_each_entry_safe(psdev, t, &seized_devices, dev_list) {
-+ list_del(&psdev->dev_list);
-+ err = pcistub_init_device(psdev->dev);
-+ if (err) {
-+ printk(KERN_ERR
-+ "pciback: %s error %d initializing device\n",
-+ pci_name(psdev->dev), err);
-+ kfree(psdev);
-+ continue;
-+ }
-+
-+ list_add_tail(&psdev->dev_list, &pci_stub_devices);
-+ }
-+
-+ initialize_devices = 1;
-+
-+ spin_unlock(&pci_stub_devices_lock);
-+
-+ return 0;
-+}
-+
-+device_initcall(pcistub_init_devices_late);
-+
-+static int __devinit pcistub_seize(struct pci_dev *dev)
-+{
-+ struct pci_stub_device *psdev;
-+ int err = 0;
-+
-+ psdev = kmalloc(sizeof(*psdev), GFP_KERNEL);
-+ if (!psdev)
-+ return -ENOMEM;
-+
-+ psdev->dev = dev;
-+ atomic_set(&psdev->in_use, 1);
-+
-+ spin_lock(&pci_stub_devices_lock);
-+
-+ if (initialize_devices) {
-+ err = pcistub_init_device(psdev->dev);
-+ if (err)
-+ goto out;
-+
-+ list_add(&psdev->dev_list, &pci_stub_devices);
-+ } else
-+ list_add(&psdev->dev_list, &seized_devices);
-+
-+ out:
-+ spin_unlock(&pci_stub_devices_lock);
-+
-+ if (err)
-+ kfree(psdev);
-+
-+ return err;
-+}
-+
-+static int __devinit pcistub_probe(struct pci_dev *dev,
-+ const struct pci_device_id *id)
-+{
-+ struct pci_stub_device_id *pdev_id;
-+ struct pci_dev *seized_dev;
-+ int err = 0;
-+
-+ list_for_each_entry(pdev_id, &pci_stub_device_ids, slot_list) {
-+
-+ if (!pcistub_match(dev, pdev_id))
-+ continue;
-+
-+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
-+ && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
-+ printk(KERN_ERR
-+ "pciback: %s: can't export pci devices that "
-+ "don't have a normal (0) or bridge (1) "
-+ "header type!\n", pci_name(dev));
-+ break;
-+ }
-+
-+ pr_info("pciback: seizing PCI device %s\n", pci_name(dev));
-+ seized_dev = pci_dev_get(dev);
-+
-+ if (seized_dev) {
-+ err = pcistub_seize(seized_dev);
-+ if (err) {
-+ pci_dev_put(dev);
-+ goto out;
-+ }
-+
-+ /* Success! */
-+ goto out;
-+ }
-+ }
-+
-+ /* Didn't find the device */
-+ err = -ENODEV;
-+
-+ out:
-+ return err;
-+}
-+
-+struct pci_device_id pcistub_ids[] = {
-+ {
-+ .vendor = PCI_ANY_ID,
-+ .device = PCI_ANY_ID,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ },
-+ {0,},
-+};
-+
-+/*
-+ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
-+ * for a normal device. I don't want it to be loaded automatically.
-+ */
-+
-+struct pci_driver pciback_pci_driver = {
-+ .name = "pciback",
-+ .id_table = pcistub_ids,
-+ .probe = pcistub_probe,
-+};
-+
-+static int __init pcistub_init(void)
-+{
-+ int pos = 0;
-+ struct pci_stub_device_id *pci_dev_id;
-+ int err = 0;
-+ int domain, bus, slot, func;
-+ int parsed;
-+
-+ if (pci_devs_to_hide && *pci_devs_to_hide) {
-+ do {
-+ parsed = 0;
-+
-+ err = sscanf(pci_devs_to_hide + pos,
-+ " (%x:%x:%x.%x) %n",
-+ &domain, &bus, &slot, &func, &parsed);
-+ if (err != 4) {
-+ domain = 0;
-+ err = sscanf(pci_devs_to_hide + pos,
-+ " (%x:%x.%x) %n",
-+ &bus, &slot, &func, &parsed);
-+ if (err != 3)
-+ goto parse_error;
-+ }
-+
-+ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
-+ if (!pci_dev_id) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ pci_dev_id->domain = domain;
-+ pci_dev_id->bus = bus;
-+ pci_dev_id->devfn = PCI_DEVFN(slot, func);
-+
-+ pr_debug
-+ ("pciback: wants to seize %04x:%02x:%02x.%01x\n",
-+ domain, bus, slot, func);
-+
-+ list_add_tail(&pci_dev_id->slot_list,
-+ &pci_stub_device_ids);
-+
-+ /* if parsed<=0, we've reached the end of the string */
-+ pos += parsed;
-+ } while (parsed > 0 && pci_devs_to_hide[pos]);
-+
-+ /* If we're the first PCI Device Driver to register, we're the
-+ * first one to get offered PCI devices as they become
-+ * available (and thus we can be the first to grab them)
-+ */
-+ pci_register_driver(&pciback_pci_driver);
-+ }
-+
-+ out:
-+ return err;
-+
-+ parse_error:
-+ printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
-+ pci_devs_to_hide + pos);
-+ return -EINVAL;
-+}
-+
-+/*
-+ * fs_initcall happens before device_initcall
-+ * so pciback *should* get called first (b/c we
-+ * want to suck up any device before other drivers
-+ * get a chance by being the first pci device
-+ * driver to register)
-+ */
-+fs_initcall(pcistub_init);
-diff --git a/drivers/xen/pciback/pciback.h b/drivers/xen/pciback/pciback.h
-new file mode 100644
-index 0000000..a1f408b
---- /dev/null
-+++ b/drivers/xen/pciback/pciback.h
-@@ -0,0 +1,73 @@
-+/*
-+ * PCI Backend Common Data Structures & Function Declarations
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_PCIBACK_H__
-+#define __XEN_PCIBACK_H__
-+
-+#include <linux/pci.h>
-+#include <linux/interrupt.h>
-+#include <xen/xenbus.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+#include <xen/interface/io/pciif.h>
-+
-+struct pci_dev_entry {
-+ struct list_head list;
-+ struct pci_dev *dev;
-+};
-+
-+struct pciback_device {
-+ void *pci_dev_data;
-+ spinlock_t dev_lock;
-+
-+ struct xenbus_device *xdev;
-+
-+ struct xenbus_watch be_watch;
-+ u8 be_watching;
-+
-+ int evtchn_irq;
-+
-+ struct xen_pci_sharedinfo *sh_info;
-+};
-+
-+struct pciback_dev_data {
-+ struct list_head config_fields;
-+};
-+
-+/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
-+struct pci_dev *pcistub_get_pci_dev_by_slot(int domain, int bus,
-+ int slot, int func);
-+struct pci_dev *pcistub_get_pci_dev(struct pci_dev *dev);
-+void pcistub_put_pci_dev(struct pci_dev *dev);
-+
-+/* Ensure a device is turned off or reset */
-+void pciback_disable_device(struct pci_dev *dev);
-+void pciback_reset_device(struct pci_dev *pdev);
-+
-+/* Access a virtual configuration space for a PCI device */
-+int pciback_config_init(struct pci_dev *dev);
-+void pciback_config_reset(struct pci_dev *dev);
-+void pciback_config_free(struct pci_dev *dev);
-+int pciback_config_read(struct pci_dev *dev, int offset, int size,
-+ u32 * ret_val);
-+int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
-+
-+/* Handle requests for specific devices from the frontend */
-+typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
-+ unsigned int domain, unsigned int bus);
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn);
-+int pciback_init_devices(struct pciback_device *pdev);
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb cb);
-+void pciback_release_devices(struct pciback_device *pdev);
-+
-+/* Handles events from front-end */
-+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
-+
-+extern int verbose_request;
-+#endif
-diff --git a/drivers/xen/pciback/pciback_ops.c b/drivers/xen/pciback/pciback_ops.c
-new file mode 100644
-index 0000000..9019608
---- /dev/null
-+++ b/drivers/xen/pciback/pciback_ops.c
-@@ -0,0 +1,84 @@
-+/*
-+ * PCI Backend Operations - respond to PCI requests from Frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <asm/bitops.h>
-+#include "pciback.h"
-+
-+int verbose_request = 0;
-+module_param(verbose_request, int, 0644);
-+
-+/* For those architectures without a pcibios_disable_device */
-+void __attribute__ ((weak)) pcibios_disable_device(struct pci_dev *dev) { }
-+
-+void pciback_disable_device(struct pci_dev *dev)
-+{
-+ if (dev->is_enabled) {
-+ dev->is_enabled = 0;
-+ pcibios_disable_device(dev);
-+ }
-+}
-+
-+/* Ensure a device is "turned off" and ready to be exported.
-+ * This also sets up the device's private data to keep track of what should
-+ * be in the base address registers (BARs) so that we can keep the
-+ * client from manipulating them directly.
-+ */
-+void pciback_reset_device(struct pci_dev *dev)
-+{
-+ u16 cmd;
-+
-+ /* Disable devices (but not bridges) */
-+ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-+ pciback_disable_device(dev);
-+
-+ pci_write_config_word(dev, PCI_COMMAND, 0);
-+
-+ dev->is_enabled = 0;
-+ dev->is_busmaster = 0;
-+ } else {
-+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
-+ if (cmd & (PCI_COMMAND_INVALIDATE)) {
-+ cmd &= ~(PCI_COMMAND_INVALIDATE);
-+ pci_write_config_word(dev, PCI_COMMAND, cmd);
-+
-+ dev->is_busmaster = 0;
-+ }
-+ }
-+
-+ pciback_config_reset(dev);
-+}
-+
-+irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ struct pciback_device *pdev = dev_id;
-+ struct pci_dev *dev;
-+ struct xen_pci_op *op = &pdev->sh_info->op;
-+
-+ if (unlikely(!test_bit(_XEN_PCIF_active,
-+ (unsigned long *)&pdev->sh_info->flags))) {
-+ pr_debug("pciback: interrupt, but no active operation\n");
-+ goto out;
-+ }
-+
-+ dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
-+
-+ if (dev == NULL)
-+ op->err = XEN_PCI_ERR_dev_not_found;
-+ else if (op->cmd == XEN_PCI_OP_conf_read)
-+ op->err = pciback_config_read(dev, op->offset, op->size,
-+ &op->value);
-+ else if (op->cmd == XEN_PCI_OP_conf_write)
-+ op->err = pciback_config_write(dev, op->offset, op->size,
-+ op->value);
-+ else
-+ op->err = XEN_PCI_ERR_not_implemented;
-+
-+ wmb();
-+ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
-+
-+ out:
-+ return IRQ_HANDLED;
-+}
-diff --git a/drivers/xen/pciback/vpci.c b/drivers/xen/pciback/vpci.c
-new file mode 100644
-index 0000000..17d554d
---- /dev/null
-+++ b/drivers/xen/pciback/vpci.c
-@@ -0,0 +1,163 @@
-+/*
-+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
-+ * to the frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/slab.h>
-+#include <linux/pci.h>
-+#include "pciback.h"
-+
-+#define PCI_SLOT_MAX 32
-+
-+struct vpci_dev_data {
-+ struct list_head dev_list[PCI_SLOT_MAX];
-+};
-+
-+static inline struct list_head *list_first(struct list_head *head)
-+{
-+ return head->next;
-+}
-+
-+struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus,
-+ unsigned int devfn)
-+{
-+ struct pci_dev_entry *dev_entry;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+
-+ if (domain != 0 || bus != 0)
-+ return NULL;
-+
-+ if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
-+ /* we don't need to lock the list here because once the backend
-+ * is in operation, it won't have any more devices addeded
-+ * (or removed).
-+ */
-+ list_for_each_entry(dev_entry,
-+ &vpci_dev->dev_list[PCI_SLOT(devfn)],
-+ list) {
-+ if (PCI_FUNC(dev_entry->dev->devfn) == PCI_FUNC(devfn))
-+ return dev_entry->dev;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
-+{
-+ if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
-+ && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/* Must hold pciback_device->dev_lock when calling this */
-+int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
-+{
-+ int err = 0, slot;
-+ struct pci_dev_entry *t, *dev_entry;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+
-+ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
-+ err = -EFAULT;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Can't export bridges on the virtual PCI bus");
-+ goto out;
-+ }
-+
-+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
-+ if (!dev_entry) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error adding entry to virtual PCI bus");
-+ goto out;
-+ }
-+
-+ dev_entry->dev = dev;
-+
-+ /* Keep multi-function devices together on the virtual PCI bus */
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (!list_empty(&vpci_dev->dev_list[slot])) {
-+ t = list_entry(list_first(&vpci_dev->dev_list[slot]),
-+ struct pci_dev_entry, list);
-+
-+ if (match_slot(dev, t->dev)) {
-+ pr_info("pciback: vpci: %s: "
-+ "assign to virtual slot %d func %d\n",
-+ pci_name(dev), slot,
-+ PCI_FUNC(dev->devfn));
-+ list_add_tail(&dev_entry->list,
-+ &vpci_dev->dev_list[slot]);
-+ goto out;
-+ }
-+ }
-+ }
-+
-+ /* Assign to a new slot on the virtual PCI bus */
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ if (list_empty(&vpci_dev->dev_list[slot])) {
-+ printk(KERN_INFO
-+ "pciback: vpci: %s: assign to virtual slot %d\n",
-+ pci_name(dev), slot);
-+ list_add_tail(&dev_entry->list,
-+ &vpci_dev->dev_list[slot]);
-+ goto out;
-+ }
-+ }
-+
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "No more space on root virtual PCI bus");
-+
-+ out:
-+ return err;
-+}
-+
-+int pciback_init_devices(struct pciback_device *pdev)
-+{
-+ int slot;
-+ struct vpci_dev_data *vpci_dev;
-+
-+ vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
-+ if (!vpci_dev)
-+ return -ENOMEM;
-+
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
-+ }
-+
-+ pdev->pci_dev_data = vpci_dev;
-+
-+ return 0;
-+}
-+
-+int pciback_publish_pci_roots(struct pciback_device *pdev,
-+ publish_pci_root_cb publish_cb)
-+{
-+ /* The Virtual PCI bus has only one root */
-+ return publish_cb(pdev, 0, 0);
-+}
-+
-+/* Must hold pciback_device->dev_lock when calling this */
-+void pciback_release_devices(struct pciback_device *pdev)
-+{
-+ int slot;
-+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
-+
-+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
-+ struct pci_dev_entry *e, *tmp;
-+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
-+ list) {
-+ list_del(&e->list);
-+ pcistub_put_pci_dev(e->dev);
-+ kfree(e);
-+ }
-+ }
-+
-+ kfree(vpci_dev);
-+ pdev->pci_dev_data = NULL;
-+}
-diff --git a/drivers/xen/pciback/xenbus.c b/drivers/xen/pciback/xenbus.c
-new file mode 100644
-index 0000000..b3dc7fb
---- /dev/null
-+++ b/drivers/xen/pciback/xenbus.c
-@@ -0,0 +1,439 @@
-+/*
-+ * PCI Backend Xenbus Setup - handles setup with frontend and xend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/list.h>
-+#include <xen/xenbus.h>
-+#include <xen/evtchn.h>
-+#include "pciback.h"
-+
-+#define INVALID_EVTCHN_IRQ (-1)
-+
-+struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
-+{
-+ struct pciback_device *pdev;
-+
-+ pdev = kmalloc(sizeof(struct pciback_device), GFP_KERNEL);
-+ if (pdev == NULL)
-+ goto out;
-+ dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
-+
-+ pdev->xdev = xdev;
-+ xdev->data = pdev;
-+
-+ spin_lock_init(&pdev->dev_lock);
-+
-+ pdev->sh_info = NULL;
-+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
-+ pdev->be_watching = 0;
-+
-+ if (pciback_init_devices(pdev)) {
-+ kfree(pdev);
-+ pdev = NULL;
-+ }
-+ out:
-+ return pdev;
-+}
-+
-+void free_pdev(struct pciback_device *pdev)
-+{
-+ if (pdev->be_watching)
-+ unregister_xenbus_watch(&pdev->be_watch);
-+
-+ /* Ensure the guest can't trigger our handler before removing devices */
-+ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ)
-+ unbind_from_irqhandler(pdev->evtchn_irq, pdev);
-+
-+ if (pdev->sh_info)
-+ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
-+
-+ pciback_release_devices(pdev);
-+
-+ pdev->xdev->data = NULL;
-+ pdev->xdev = NULL;
-+
-+ kfree(pdev);
-+}
-+
-+static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
-+ int remote_evtchn)
-+{
-+ int err = 0;
-+ int evtchn;
-+ dev_dbg(&pdev->xdev->dev,
-+ "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
-+ gnt_ref, remote_evtchn);
-+
-+ err =
-+ xenbus_map_ring_valloc(pdev->xdev, gnt_ref,
-+ (void **)&pdev->sh_info);
-+ if (err)
-+ goto out;
-+
-+ err = xenbus_bind_evtchn(pdev->xdev, remote_evtchn, &evtchn);
-+ if (err)
-+ goto out;
-+
-+ err = bind_evtchn_to_irqhandler(evtchn, pciback_handle_event,
-+ SA_SAMPLE_RANDOM, "pciback", pdev);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error binding event channel to IRQ");
-+ goto out;
-+ }
-+ pdev->evtchn_irq = err;
-+ err = 0;
-+
-+ dev_dbg(&pdev->xdev->dev, "Attached!\n");
-+ out:
-+ return err;
-+}
-+
-+static int pciback_attach(struct pciback_device *pdev)
-+{
-+ int err = 0;
-+ int gnt_ref, remote_evtchn;
-+ char *magic = NULL;
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ /* Make sure we only do this setup once */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateInitialised)
-+ goto out;
-+
-+ /* Wait for frontend to state that it has published the configuration */
-+ if (xenbus_read_driver_state(pdev->xdev->otherend) !=
-+ XenbusStateInitialised)
-+ goto out;
-+
-+ dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
-+
-+ err = xenbus_gather(XBT_NULL, pdev->xdev->otherend,
-+ "pci-op-ref", "%u", &gnt_ref,
-+ "event-channel", "%u", &remote_evtchn,
-+ "magic", NULL, &magic, NULL);
-+ if (err) {
-+ /* If configuration didn't get read correctly, wait longer */
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading configuration from frontend");
-+ goto out;
-+ }
-+
-+ if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
-+ xenbus_dev_fatal(pdev->xdev, -EFAULT,
-+ "version mismatch (%s/%s) with pcifront - "
-+ "halting pciback",
-+ magic, XEN_PCI_MAGIC);
-+ goto out;
-+ }
-+
-+ err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
-+ if (err)
-+ goto out;
-+
-+ dev_dbg(&pdev->xdev->dev, "Connecting...\n");
-+
-+ err = xenbus_switch_state(pdev->xdev, XBT_NULL, XenbusStateConnected);
-+ if (err)
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error switching to connected state!");
-+
-+ dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
-+ out:
-+ spin_unlock(&pdev->dev_lock);
-+
-+ if (magic)
-+ kfree(magic);
-+
-+ return err;
-+}
-+
-+static void pciback_frontend_changed(struct xenbus_device *xdev,
-+ XenbusState fe_state)
-+{
-+ struct pciback_device *pdev = xdev->data;
-+
-+ dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
-+
-+ switch (fe_state) {
-+ case XenbusStateInitialised:
-+ pciback_attach(pdev);
-+ break;
-+
-+ case XenbusStateClosing:
-+ xenbus_switch_state(xdev, XBT_NULL, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateClosed:
-+ dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
-+ device_unregister(&xdev->dev);
-+ break;
-+
-+ default:
-+ break;
-+ }
-+}
-+
-+static int pciback_publish_pci_root(struct pciback_device *pdev,
-+ unsigned int domain, unsigned int bus)
-+{
-+ unsigned int d, b;
-+ int i, root_num, len, err;
-+ char str[64];
-+
-+ dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
-+
-+ err = xenbus_scanf(XBT_NULL, pdev->xdev->nodename,
-+ "root_num", "%d", &root_num);
-+ if (err == 0 || err == -ENOENT)
-+ root_num = 0;
-+ else if (err < 0)
-+ goto out;
-+
-+ /* Verify that we haven't already published this pci root */
-+ for (i = 0; i < root_num; i++) {
-+ len = snprintf(str, sizeof(str), "root-%d", i);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NULL, pdev->xdev->nodename,
-+ str, "%x:%x", &d, &b);
-+ if (err < 0)
-+ goto out;
-+ if (err != 2) {
-+ err = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (d == domain && b == bus) {
-+ err = 0;
-+ goto out;
-+ }
-+ }
-+
-+ len = snprintf(str, sizeof(str), "root-%d", root_num);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
-+ root_num, domain, bus);
-+
-+ err = xenbus_printf(XBT_NULL, pdev->xdev->nodename, str,
-+ "%04x:%02x", domain, bus);
-+ if (err)
-+ goto out;
-+
-+ err = xenbus_printf(XBT_NULL, pdev->xdev->nodename,
-+ "root_num", "%d", (root_num + 1));
-+
-+ out:
-+ return err;
-+}
-+
-+static int pciback_export_device(struct pciback_device *pdev,
-+ int domain, int bus, int slot, int func)
-+{
-+ struct pci_dev *dev;
-+ int err = 0;
-+
-+ dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
-+ domain, bus, slot, func);
-+
-+ dev = pcistub_get_pci_dev_by_slot(domain, bus, slot, func);
-+ if (!dev) {
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Couldn't locate PCI device "
-+ "(%04x:%02x:%02x.%01x)! "
-+ "perhaps already in-use?",
-+ domain, bus, slot, func);
-+ goto out;
-+ }
-+
-+ err = pciback_add_pci_dev(pdev, dev);
-+ if (err)
-+ goto out;
-+
-+ /* TODO: It'd be nice to export a bridge and have all of its children
-+ * get exported with it. This may be best done in xend (which will
-+ * have to calculate resource usage anyway) but we probably want to
-+ * put something in here to ensure that if a bridge gets given to a
-+ * driver domain, that all devices under that bridge are not given
-+ * to other driver domains (as he who controls the bridge can disable
-+ * it and stop the other devices from working).
-+ */
-+ out:
-+ return err;
-+}
-+
-+static int pciback_setup_backend(struct pciback_device *pdev)
-+{
-+ /* Get configuration from xend (if available now) */
-+ int domain, bus, slot, func;
-+ int err = 0;
-+ int i, num_devs;
-+ char dev_str[64];
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ /* It's possible we could get the call to setup twice, so make sure
-+ * we're not already connected.
-+ */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateInitWait)
-+ goto out;
-+
-+ dev_dbg(&pdev->xdev->dev, "getting be setup\n");
-+
-+ err = xenbus_scanf(XBT_NULL, pdev->xdev->nodename, "num_devs", "%d",
-+ &num_devs);
-+ if (err != 1) {
-+ if (err >= 0)
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading number of devices");
-+ goto out;
-+ }
-+
-+ for (i = 0; i < num_devs; i++) {
-+ int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
-+ if (unlikely(l >= (sizeof(dev_str) - 1))) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "String overflow while reading "
-+ "configuration");
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NULL, pdev->xdev->nodename, dev_str,
-+ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
-+ if (err < 0) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading device configuration");
-+ goto out;
-+ }
-+ if (err != 4) {
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error parsing pci device "
-+ "configuration");
-+ goto out;
-+ }
-+
-+ err = pciback_export_device(pdev, domain, bus, slot, func);
-+ if (err)
-+ goto out;
-+ }
-+
-+ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error while publish PCI root buses "
-+ "for frontend");
-+ goto out;
-+ }
-+
-+ err = xenbus_switch_state(pdev->xdev, XBT_NULL, XenbusStateInitialised);
-+ if (err)
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error switching to initialised state!");
-+
-+ out:
-+ spin_unlock(&pdev->dev_lock);
-+
-+ if (!err)
-+ /* see if pcifront is already configured (if not, we'll wait) */
-+ pciback_attach(pdev);
-+
-+ return err;
-+}
-+
-+static void pciback_be_watch(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ struct pciback_device *pdev =
-+ container_of(watch, struct pciback_device, be_watch);
-+
-+ switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
-+ case XenbusStateInitWait:
-+ pciback_setup_backend(pdev);
-+ break;
-+
-+ default:
-+ break;
-+ }
-+}
-+
-+static int pciback_xenbus_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err = 0;
-+ struct pciback_device *pdev = alloc_pdev(dev);
-+
-+ if (pdev == NULL) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(dev, err,
-+ "Error allocating pciback_device struct");
-+ goto out;
-+ }
-+
-+ /* wait for xend to configure us */
-+ err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
-+ if (err)
-+ goto out;
-+
-+ /* watch the backend node for backend configuration information */
-+ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
-+ pciback_be_watch);
-+ if (err)
-+ goto out;
-+ pdev->be_watching = 1;
-+
-+ /* We need to force a call to our callback here in case
-+ * xend already configured us!
-+ */
-+ pciback_be_watch(&pdev->be_watch, NULL, 0);
-+
-+ out:
-+ return err;
-+}
-+
-+static int pciback_xenbus_remove(struct xenbus_device *dev)
-+{
-+ struct pciback_device *pdev = dev->data;
-+
-+ if (pdev != NULL)
-+ free_pdev(pdev);
-+
-+ return 0;
-+}
-+
-+static struct xenbus_device_id xenpci_ids[] = {
-+ {"pci"},
-+ {{0}},
-+};
-+
-+static struct xenbus_driver xenbus_pciback_driver = {
-+ .name = "pciback",
-+ .owner = THIS_MODULE,
-+ .ids = xenpci_ids,
-+ .probe = pciback_xenbus_probe,
-+ .remove = pciback_xenbus_remove,
-+ .otherend_changed = pciback_frontend_changed,
-+};
-+
-+static __init int pciback_xenbus_register(void)
-+{
-+ return xenbus_register_backend(&xenbus_pciback_driver);
-+}
-+
-+/* Must only initialize our xenbus driver after the pcistub driver */
-+device_initcall(pciback_xenbus_register);
-diff --git a/drivers/xen/pcifront/Makefile b/drivers/xen/pcifront/Makefile
-new file mode 100644
-index 0000000..621e988
---- /dev/null
-+++ b/drivers/xen/pcifront/Makefile
-@@ -0,0 +1,7 @@
-+obj-y += pcifront.o
-+
-+pcifront-y := pci_op.o xenbus.o pci.o
-+
-+ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
-+EXTRA_CFLAGS += -DDEBUG
-+endif
-diff --git a/drivers/xen/pcifront/pci.c b/drivers/xen/pcifront/pci.c
-new file mode 100644
-index 0000000..d383410
---- /dev/null
-+++ b/drivers/xen/pcifront/pci.c
-@@ -0,0 +1,44 @@
-+/*
-+ * PCI Frontend Operations - ensure only one PCI frontend runs at a time
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include "pcifront.h"
-+
-+DEFINE_SPINLOCK(pcifront_dev_lock);
-+static struct pcifront_device *pcifront_dev = NULL;
-+
-+int pcifront_connect(struct pcifront_device *pdev)
-+{
-+ int err = 0;
-+
-+ spin_lock(&pcifront_dev_lock);
-+
-+ if (!pcifront_dev)
-+ dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
-+ else {
-+ dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
-+ err = -EEXIST;
-+ }
-+
-+ spin_unlock(&pcifront_dev_lock);
-+
-+ return err;
-+}
-+
-+void pcifront_disconnect(struct pcifront_device *pdev)
-+{
-+ spin_lock(&pcifront_dev_lock);
-+
-+ if (pdev == pcifront_dev) {
-+ dev_info(&pdev->xdev->dev,
-+ "Disconnecting PCI Frontend Buses\n");
-+ pcifront_dev = NULL;
-+ }
-+
-+ spin_unlock(&pcifront_dev_lock);
-+}
-diff --git a/drivers/xen/pcifront/pci_op.c b/drivers/xen/pcifront/pci_op.c
-new file mode 100644
-index 0000000..9679192
---- /dev/null
-+++ b/drivers/xen/pcifront/pci_op.c
-@@ -0,0 +1,245 @@
-+/*
-+ * PCI Frontend Operations - Communicates with frontend
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <xen/evtchn.h>
-+#include "pcifront.h"
-+
-+static int verbose_request = 0;
-+module_param(verbose_request, int, 0644);
-+
-+static int errno_to_pcibios_err(int errno)
-+{
-+ switch (errno) {
-+ case XEN_PCI_ERR_success:
-+ return PCIBIOS_SUCCESSFUL;
-+
-+ case XEN_PCI_ERR_dev_not_found:
-+ return PCIBIOS_DEVICE_NOT_FOUND;
-+
-+ case XEN_PCI_ERR_invalid_offset:
-+ case XEN_PCI_ERR_op_failed:
-+ return PCIBIOS_BAD_REGISTER_NUMBER;
-+
-+ case XEN_PCI_ERR_not_implemented:
-+ return PCIBIOS_FUNC_NOT_SUPPORTED;
-+
-+ case XEN_PCI_ERR_access_denied:
-+ return PCIBIOS_SET_FAILED;
-+ }
-+ return errno;
-+}
-+
-+static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
-+{
-+ int err = 0;
-+ struct xen_pci_op *active_op = &pdev->sh_info->op;
-+ unsigned long irq_flags;
-+
-+ unsigned int volatile ttl = (1U << 29);
-+
-+ spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
-+
-+ memcpy(active_op, op, sizeof(struct xen_pci_op));
-+
-+ /* Go */
-+ wmb();
-+ set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
-+ notify_remote_via_evtchn(pdev->evtchn);
-+
-+ /* IRQs are disabled for the pci config. space reads/writes,
-+ * which means no event channel to notify us that the backend
-+ * is done so spin while waiting for the answer */
-+ while (test_bit
-+ (_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)) {
-+ if (!ttl) {
-+ dev_err(&pdev->xdev->dev,
-+ "pciback not responding!!!\n");
-+ clear_bit(_XEN_PCIF_active,
-+ (unsigned long *)&pdev->sh_info->flags);
-+ err = XEN_PCI_ERR_dev_not_found;
-+ goto out;
-+ }
-+ ttl--;
-+ }
-+
-+ memcpy(op, active_op, sizeof(struct xen_pci_op));
-+
-+ err = op->err;
-+ out:
-+ spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
-+ return err;
-+}
-+
-+/* Access to this function is spinlocked in drivers/pci/access.c */
-+static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
-+ int where, int size, u32 * val)
-+{
-+ int err = 0;
-+ struct xen_pci_op op = {
-+ .cmd = XEN_PCI_OP_conf_read,
-+ .domain = pci_domain_nr(bus),
-+ .bus = bus->number,
-+ .devfn = devfn,
-+ .offset = where,
-+ .size = size,
-+ };
-+ struct pcifront_sd *sd = bus->sysdata;
-+ struct pcifront_device *pdev = sd->pdev;
-+
-+ if (verbose_request)
-+ dev_info(&pdev->xdev->dev,
-+ "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
-+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
-+ PCI_FUNC(devfn), where, size);
-+
-+ err = do_pci_op(pdev, &op);
-+
-+ if (likely(!err)) {
-+ if (verbose_request)
-+ dev_info(&pdev->xdev->dev, "read got back value %x\n",
-+ op.value);
-+
-+ *val = op.value;
-+ } else if (err == -ENODEV) {
-+ /* No device here, pretend that it just returned 0 */
-+ err = 0;
-+ *val = 0;
-+ }
-+
-+ return errno_to_pcibios_err(err);
-+}
-+
-+/* Access to this function is spinlocked in drivers/pci/access.c */
-+static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
-+ int where, int size, u32 val)
-+{
-+ struct xen_pci_op op = {
-+ .cmd = XEN_PCI_OP_conf_write,
-+ .domain = pci_domain_nr(bus),
-+ .bus = bus->number,
-+ .devfn = devfn,
-+ .offset = where,
-+ .size = size,
-+ .value = val,
-+ };
-+ struct pcifront_sd *sd = bus->sysdata;
-+ struct pcifront_device *pdev = sd->pdev;
-+
-+ if (verbose_request)
-+ dev_info(&pdev->xdev->dev,
-+ "write dev=%04x:%02x:%02x.%01x - "
-+ "offset %x size %d val %x\n",
-+ pci_domain_nr(bus), bus->number,
-+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
-+
-+ return errno_to_pcibios_err(do_pci_op(pdev, &op));
-+}
-+
-+struct pci_ops pcifront_bus_ops = {
-+ .read = pcifront_bus_read,
-+ .write = pcifront_bus_write,
-+};
-+
-+/* Claim resources for the PCI frontend as-is, backend won't allow changes */
-+static void pcifront_claim_resource(struct pci_dev *dev, void *data)
-+{
-+ struct pcifront_device *pdev = data;
-+ int i;
-+ struct resource *r;
-+
-+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-+ r = &dev->resource[i];
-+
-+ if (!r->parent && r->start && r->flags) {
-+ dev_dbg(&pdev->xdev->dev, "claiming resource %s/%d\n",
-+ pci_name(dev), i);
-+ pci_claim_resource(dev, i);
-+ }
-+ }
-+}
-+
-+int pcifront_scan_root(struct pcifront_device *pdev,
-+ unsigned int domain, unsigned int bus)
-+{
-+ struct pci_bus *b;
-+ struct pcifront_sd *sd = NULL;
-+ struct pci_bus_entry *bus_entry = NULL;
-+ int err = 0;
-+
-+#ifndef CONFIG_PCI_DOMAINS
-+ if (domain != 0) {
-+ dev_err(&pdev->xdev->dev,
-+ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
-+ dev_err(&pdev->xdev->dev,
-+ "Please compile with CONFIG_PCI_DOMAINS\n");
-+ err = -EINVAL;
-+ goto err_out;
-+ }
-+#endif
-+
-+ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
-+ domain, bus);
-+
-+ bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
-+ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
-+ if (!bus_entry || !sd) {
-+ err = -ENOMEM;
-+ goto err_out;
-+ }
-+ sd->domain = domain;
-+ sd->pdev = pdev;
-+
-+ b = pci_scan_bus_parented(&pdev->xdev->dev, bus, &pcifront_bus_ops, sd);
-+ if (!b) {
-+ dev_err(&pdev->xdev->dev, "Error creating PCI Frontend Bus!\n");
-+ err = -ENOMEM;
-+ goto err_out;
-+ }
-+ bus_entry->bus = b;
-+
-+ list_add(&bus_entry->list, &pdev->root_buses);
-+
-+ /* Claim resources before going "live" with our devices */
-+ pci_walk_bus(b, pcifront_claim_resource, pdev);
-+
-+ pci_bus_add_devices(b);
-+
-+ return 0;
-+
-+ err_out:
-+ kfree(bus_entry);
-+ kfree(sd);
-+
-+ return err;
-+}
-+
-+void pcifront_free_roots(struct pcifront_device *pdev)
-+{
-+ struct pci_bus_entry *bus_entry, *t;
-+
-+ list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
-+ /* TODO: Removing a PCI Bus is untested (as it normally
-+ * just goes away on domain shutdown)
-+ */
-+ list_del(&bus_entry->list);
-+
-+ spin_lock(&pci_bus_lock);
-+ list_del(&bus_entry->bus->node);
-+ spin_unlock(&pci_bus_lock);
-+
-+ kfree(bus_entry->bus->sysdata);
-+
-+ device_unregister(bus_entry->bus->bridge);
-+
-+ /* Do we need to free() the bus itself? */
-+
-+ kfree(bus_entry);
-+ }
-+}
-diff --git a/drivers/xen/pcifront/pcifront.h b/drivers/xen/pcifront/pcifront.h
-new file mode 100644
-index 0000000..70bbf3b
---- /dev/null
-+++ b/drivers/xen/pcifront/pcifront.h
-@@ -0,0 +1,40 @@
-+/*
-+ * PCI Frontend - Common data structures & function declarations
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_PCIFRONT_H__
-+#define __XEN_PCIFRONT_H__
-+
-+#include <linux/spinlock.h>
-+#include <linux/pci.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/io/pciif.h>
-+#include <xen/pcifront.h>
-+
-+struct pci_bus_entry {
-+ struct list_head list;
-+ struct pci_bus *bus;
-+};
-+
-+struct pcifront_device {
-+ struct xenbus_device *xdev;
-+ struct list_head root_buses;
-+ spinlock_t dev_lock;
-+
-+ int evtchn;
-+ int gnt_ref;
-+
-+ /* Lock this when doing any operations in sh_info */
-+ spinlock_t sh_info_lock;
-+ struct xen_pci_sharedinfo *sh_info;
-+};
-+
-+int pcifront_connect(struct pcifront_device *pdev);
-+void pcifront_disconnect(struct pcifront_device *pdev);
-+
-+int pcifront_scan_root(struct pcifront_device *pdev,
-+ unsigned int domain, unsigned int bus);
-+void pcifront_free_roots(struct pcifront_device *pdev);
-+
-+#endif /* __XEN_PCIFRONT_H__ */
-diff --git a/drivers/xen/pcifront/xenbus.c b/drivers/xen/pcifront/xenbus.c
-new file mode 100644
-index 0000000..c596ed4
---- /dev/null
-+++ b/drivers/xen/pcifront/xenbus.c
-@@ -0,0 +1,295 @@
-+/*
-+ * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn)
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/mm.h>
-+#include <xen/xenbus.h>
-+#include "pcifront.h"
-+
-+#define INVALID_GRANT_REF (0)
-+#define INVALID_EVTCHN (-1)
-+
-+static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
-+{
-+ struct pcifront_device *pdev;
-+
-+ pdev = kmalloc(sizeof(struct pcifront_device), GFP_KERNEL);
-+ if (pdev == NULL)
-+ goto out;
-+
-+ pdev->sh_info =
-+ (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
-+ if (pdev->sh_info == NULL) {
-+ kfree(pdev);
-+ pdev = NULL;
-+ goto out;
-+ }
-+ pdev->sh_info->flags = 0;
-+
-+ xdev->data = pdev;
-+ pdev->xdev = xdev;
-+
-+ INIT_LIST_HEAD(&pdev->root_buses);
-+
-+ spin_lock_init(&pdev->dev_lock);
-+ spin_lock_init(&pdev->sh_info_lock);
-+
-+ pdev->evtchn = INVALID_EVTCHN;
-+ pdev->gnt_ref = INVALID_GRANT_REF;
-+
-+ dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
-+ pdev, pdev->sh_info);
-+ out:
-+ return pdev;
-+}
-+
-+static void free_pdev(struct pcifront_device *pdev)
-+{
-+ dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
-+
-+ if (pdev->evtchn != INVALID_EVTCHN)
-+ xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
-+
-+ if (pdev->gnt_ref != INVALID_GRANT_REF)
-+ gnttab_end_foreign_access(pdev->gnt_ref, 0,
-+ (unsigned long)pdev->sh_info);
-+
-+ pdev->xdev->data = NULL;
-+
-+ kfree(pdev);
-+}
-+
-+static int pcifront_publish_info(struct pcifront_device *pdev)
-+{
-+ int err = 0;
-+ xenbus_transaction_t trans;
-+
-+ err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
-+ if (err < 0)
-+ goto out;
-+
-+ pdev->gnt_ref = err;
-+
-+ err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
-+ if (err)
-+ goto out;
-+
-+ do_publish:
-+ err = xenbus_transaction_start(&trans);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error writing configuration for backend "
-+ "(start transaction)");
-+ goto out;
-+ }
-+
-+ err = xenbus_printf(trans, pdev->xdev->nodename,
-+ "pci-op-ref", "%u", pdev->gnt_ref);
-+ if (!err)
-+ err = xenbus_printf(trans, pdev->xdev->nodename,
-+ "event-channel", "%u", pdev->evtchn);
-+ if (!err)
-+ err = xenbus_printf(trans, pdev->xdev->nodename,
-+ "magic", XEN_PCI_MAGIC);
-+ if (!err)
-+ err =
-+ xenbus_switch_state(pdev->xdev, trans,
-+ XenbusStateInitialised);
-+
-+ if (err) {
-+ xenbus_transaction_end(trans, 1);
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error writing configuration for backend");
-+ goto out;
-+ } else {
-+ err = xenbus_transaction_end(trans, 0);
-+ if (err == -EAGAIN)
-+ goto do_publish;
-+ else if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error completing transaction "
-+ "for backend");
-+ goto out;
-+ }
-+ }
-+
-+ dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
-+
-+ out:
-+ return err;
-+}
-+
-+static int pcifront_try_connect(struct pcifront_device *pdev)
-+{
-+ int err = -EFAULT;
-+ int i, num_roots, len;
-+ char str[64];
-+ unsigned int domain, bus;
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ /* Only connect once */
-+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
-+ XenbusStateInitialised)
-+ goto out;
-+
-+ err = pcifront_connect(pdev);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error connecting PCI Frontend");
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NULL, pdev->xdev->otherend,
-+ "root_num", "%d", &num_roots);
-+ if (err == -ENOENT) {
-+ xenbus_dev_error(pdev->xdev, err,
-+ "No PCI Roots found, trying 0000:00");
-+ err = pcifront_scan_root(pdev, 0, 0);
-+ num_roots = 0;
-+ } else if (err != 1) {
-+ if (err == 0)
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading number of PCI roots");
-+ goto out;
-+ }
-+
-+ for (i = 0; i < num_roots; i++) {
-+ len = snprintf(str, sizeof(str), "root-%d", i);
-+ if (unlikely(len >= (sizeof(str) - 1))) {
-+ err = -ENOMEM;
-+ goto out;
-+ }
-+
-+ err = xenbus_scanf(XBT_NULL, pdev->xdev->otherend, str,
-+ "%x:%x", &domain, &bus);
-+ if (err != 2) {
-+ if (err >= 0)
-+ err = -EINVAL;
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error reading PCI root %d", i);
-+ goto out;
-+ }
-+
-+ err = pcifront_scan_root(pdev, domain, bus);
-+ if (err) {
-+ xenbus_dev_fatal(pdev->xdev, err,
-+ "Error scanning PCI root %04x:%02x",
-+ domain, bus);
-+ goto out;
-+ }
-+ }
-+
-+ err = xenbus_switch_state(pdev->xdev, XBT_NULL, XenbusStateConnected);
-+ if (err)
-+ goto out;
-+
-+ out:
-+ spin_unlock(&pdev->dev_lock);
-+ return err;
-+}
-+
-+static int pcifront_try_disconnect(struct pcifront_device *pdev)
-+{
-+ int err = 0;
-+ XenbusState prev_state;
-+
-+ spin_lock(&pdev->dev_lock);
-+
-+ prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
-+
-+ if (prev_state < XenbusStateClosing)
-+ err = xenbus_switch_state(pdev->xdev, XBT_NULL,
-+ XenbusStateClosing);
-+
-+ if (!err && prev_state == XenbusStateConnected)
-+ pcifront_disconnect(pdev);
-+
-+ spin_unlock(&pdev->dev_lock);
-+
-+ return err;
-+}
-+
-+static void pcifront_backend_changed(struct xenbus_device *xdev,
-+ XenbusState be_state)
-+{
-+ struct pcifront_device *pdev = xdev->data;
-+
-+ switch (be_state) {
-+ case XenbusStateClosing:
-+ dev_warn(&xdev->dev, "backend going away!\n");
-+ pcifront_try_disconnect(pdev);
-+ break;
-+
-+ case XenbusStateClosed:
-+ dev_warn(&xdev->dev, "backend went away!\n");
-+ pcifront_try_disconnect(pdev);
-+
-+ device_unregister(&pdev->xdev->dev);
-+ break;
-+
-+ case XenbusStateConnected:
-+ pcifront_try_connect(pdev);
-+ break;
-+
-+ default:
-+ break;
-+ }
-+}
-+
-+static int pcifront_xenbus_probe(struct xenbus_device *xdev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err = 0;
-+ struct pcifront_device *pdev = alloc_pdev(xdev);
-+
-+ if (pdev == NULL) {
-+ err = -ENOMEM;
-+ xenbus_dev_fatal(xdev, err,
-+ "Error allocating pcifront_device struct");
-+ goto out;
-+ }
-+
-+ err = pcifront_publish_info(pdev);
-+
-+ out:
-+ return err;
-+}
-+
-+static int pcifront_xenbus_remove(struct xenbus_device *xdev)
-+{
-+ if (xdev->data)
-+ free_pdev(xdev->data);
-+
-+ return 0;
-+}
-+
-+static struct xenbus_device_id xenpci_ids[] = {
-+ {"pci"},
-+ {{0}},
-+};
-+
-+static struct xenbus_driver xenbus_pcifront_driver = {
-+ .name = "pcifront",
-+ .owner = THIS_MODULE,
-+ .ids = xenpci_ids,
-+ .probe = pcifront_xenbus_probe,
-+ .remove = pcifront_xenbus_remove,
-+ .otherend_changed = pcifront_backend_changed,
-+};
-+
-+static int __init pcifront_init(void)
-+{
-+ int err = 0;
-+
-+ err = xenbus_register_frontend(&xenbus_pcifront_driver);
-+
-+ return err;
-+}
-+
-+/* Initialize after the Xen PCI Frontend Stub is initialized */
-+subsys_initcall(pcifront_init);
-diff --git a/drivers/xen/privcmd/Makefile b/drivers/xen/privcmd/Makefile
-new file mode 100644
-index 0000000..e218695
---- /dev/null
-+++ b/drivers/xen/privcmd/Makefile
-@@ -0,0 +1,2 @@
-+
-+obj-y := privcmd.o
-diff --git a/drivers/xen/privcmd/privcmd.c b/drivers/xen/privcmd/privcmd.c
-new file mode 100644
-index 0000000..52364f9
---- /dev/null
-+++ b/drivers/xen/privcmd/privcmd.c
-@@ -0,0 +1,302 @@
-+/******************************************************************************
-+ * privcmd.c
-+ *
-+ * Interface to privileged domain-0 commands.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/swap.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+#include <linux/kthread.h>
-+#include <asm/hypervisor.h>
-+
-+#include <asm/pgalloc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/tlb.h>
-+#include <asm/hypervisor.h>
-+#include <xen/public/privcmd.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/dom0_ops.h>
-+#include <xen/xen_proc.h>
-+
-+static struct proc_dir_entry *privcmd_intf;
-+static struct proc_dir_entry *capabilities_intf;
-+
-+#define NR_HYPERCALLS 32
-+static DECLARE_BITMAP(hypercall_permission_map, NR_HYPERCALLS);
-+
-+static int privcmd_ioctl(struct inode *inode, struct file *file,
-+ unsigned int cmd, unsigned long data)
-+{
-+ int ret = -ENOSYS;
-+ void __user *udata = (void __user *) data;
-+
-+ switch (cmd) {
-+ case IOCTL_PRIVCMD_HYPERCALL: {
-+ privcmd_hypercall_t hypercall;
-+
-+ if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
-+ return -EFAULT;
-+
-+ /* Check hypercall number for validity. */
-+ if (hypercall.op >= NR_HYPERCALLS)
-+ return -EINVAL;
-+ if (!test_bit(hypercall.op, hypercall_permission_map))
-+ return -EINVAL;
-+
-+#if defined(__i386__)
-+ __asm__ __volatile__ (
-+ "pushl %%ebx; pushl %%ecx; pushl %%edx; "
-+ "pushl %%esi; pushl %%edi; "
-+ "movl 4(%%eax),%%ebx ;"
-+ "movl 8(%%eax),%%ecx ;"
-+ "movl 12(%%eax),%%edx ;"
-+ "movl 16(%%eax),%%esi ;"
-+ "movl 20(%%eax),%%edi ;"
-+ "movl (%%eax),%%eax ;"
-+ "shll $5,%%eax ;"
-+ "addl $hypercall_page,%%eax ;"
-+ "call *%%eax ;"
-+ "popl %%edi; popl %%esi; popl %%edx; "
-+ "popl %%ecx; popl %%ebx"
-+ : "=a" (ret) : "0" (&hypercall) : "memory" );
-+#elif defined (__x86_64__)
-+ {
-+ long ign1, ign2, ign3;
-+ __asm__ __volatile__ (
-+ "movq %8,%%r10; movq %9,%%r8;"
-+ "shlq $5,%%rax ;"
-+ "addq $hypercall_page,%%rax ;"
-+ "call *%%rax"
-+ : "=a" (ret), "=D" (ign1),
-+ "=S" (ign2), "=d" (ign3)
-+ : "0" ((unsigned long)hypercall.op),
-+ "1" ((unsigned long)hypercall.arg[0]),
-+ "2" ((unsigned long)hypercall.arg[1]),
-+ "3" ((unsigned long)hypercall.arg[2]),
-+ "g" ((unsigned long)hypercall.arg[3]),
-+ "g" ((unsigned long)hypercall.arg[4])
-+ : "r8", "r10", "memory" );
-+ }
-+#elif defined (__ia64__)
-+ __asm__ __volatile__ (
-+ ";; mov r14=%2; mov r15=%3; "
-+ "mov r16=%4; mov r17=%5; mov r18=%6;"
-+ "mov r2=%1; break 0x1000;; mov %0=r8 ;;"
-+ : "=r" (ret)
-+ : "r" (hypercall.op),
-+ "r" (hypercall.arg[0]),
-+ "r" (hypercall.arg[1]),
-+ "r" (hypercall.arg[2]),
-+ "r" (hypercall.arg[3]),
-+ "r" (hypercall.arg[4])
-+ : "r14","r15","r16","r17","r18","r2","r8","memory");
-+#endif
-+ }
-+ break;
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
-+ case IOCTL_PRIVCMD_MMAP: {
-+#define PRIVCMD_MMAP_SZ 32
-+ privcmd_mmap_t mmapcmd;
-+ privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ];
-+ privcmd_mmap_entry_t __user *p;
-+ int i, rc;
-+
-+ if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
-+ return -EFAULT;
-+
-+ p = mmapcmd.entry;
-+
-+ for (i = 0; i < mmapcmd.num;
-+ i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
-+ int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
-+ PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
-+
-+ if (copy_from_user(&msg, p,
-+ n*sizeof(privcmd_mmap_entry_t)))
-+ return -EFAULT;
-+
-+ for (j = 0; j < n; j++) {
-+ struct vm_area_struct *vma =
-+ find_vma( current->mm, msg[j].va );
-+
-+ if (!vma)
-+ return -EINVAL;
-+
-+ if (msg[j].va > PAGE_OFFSET)
-+ return -EINVAL;
-+
-+ if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
-+ > vma->vm_end )
-+ return -EINVAL;
-+
-+ if ((rc = direct_remap_pfn_range(
-+ vma,
-+ msg[j].va&PAGE_MASK,
-+ msg[j].mfn,
-+ msg[j].npages<<PAGE_SHIFT,
-+ vma->vm_page_prot,
-+ mmapcmd.dom)) < 0)
-+ return rc;
-+ }
-+ }
-+ ret = 0;
-+ }
-+ break;
-+
-+ case IOCTL_PRIVCMD_MMAPBATCH: {
-+ mmu_update_t u;
-+ privcmd_mmapbatch_t m;
-+ struct vm_area_struct *vma = NULL;
-+ unsigned long __user *p;
-+ unsigned long addr, mfn;
-+ uint64_t ptep;
-+ int i;
-+
-+ if (copy_from_user(&m, udata, sizeof(m))) {
-+ ret = -EFAULT;
-+ goto batch_err;
-+ }
-+
-+ if (m.dom == DOMID_SELF) {
-+ ret = -EINVAL;
-+ goto batch_err;
-+ }
-+
-+ vma = find_vma(current->mm, m.addr);
-+ if (!vma) {
-+ ret = -EINVAL;
-+ goto batch_err;
-+ }
-+
-+ if (m.addr > PAGE_OFFSET) {
-+ ret = -EFAULT;
-+ goto batch_err;
-+ }
-+
-+ if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
-+ ret = -EFAULT;
-+ goto batch_err;
-+ }
-+
-+ p = m.arr;
-+ addr = m.addr;
-+ for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
-+ if (get_user(mfn, p))
-+ return -EFAULT;
-+#ifdef __ia64__
-+ ret = remap_pfn_range(vma,
-+ addr&PAGE_MASK,
-+ mfn,
-+ 1<<PAGE_SHIFT,
-+ vma->vm_page_prot);
-+ if (ret < 0)
-+ goto batch_err;
-+#else
-+
-+ ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
-+ if (ret)
-+ goto batch_err;
-+
-+ u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
-+ u.ptr = ptep;
-+
-+ if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
-+ put_user(0xF0000000 | mfn, p);
-+#endif
-+ }
-+
-+ ret = 0;
-+ break;
-+
-+ batch_err:
-+ printk("batch_err ret=%d vma=%p addr=%lx "
-+ "num=%d arr=%p %lx-%lx\n",
-+ ret, vma, m.addr, m.num, m.arr,
-+ vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
-+ break;
-+ }
-+ break;
-+#endif
-+
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
-+{
-+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
-+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-+
-+ return 0;
-+}
-+
-+static struct file_operations privcmd_file_ops = {
-+ .ioctl = privcmd_ioctl,
-+ .mmap = privcmd_mmap,
-+};
-+
-+static int capabilities_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len = 0;
-+ *page = 0;
-+
-+ if (xen_start_info->flags & SIF_INITDOMAIN)
-+ len = sprintf( page, "control_d\n" );
-+
-+ *eof = 1;
-+ return len;
-+}
-+
-+static int __init privcmd_init(void)
-+{
-+ /* Set of hypercalls that privileged applications may execute. */
-+ set_bit(__HYPERVISOR_acm_op, hypercall_permission_map);
-+ set_bit(__HYPERVISOR_dom0_op, hypercall_permission_map);
-+ set_bit(__HYPERVISOR_event_channel_op, hypercall_permission_map);
-+ set_bit(__HYPERVISOR_memory_op, hypercall_permission_map);
-+ set_bit(__HYPERVISOR_mmu_update, hypercall_permission_map);
-+ set_bit(__HYPERVISOR_mmuext_op, hypercall_permission_map);
-+ set_bit(__HYPERVISOR_xen_version, hypercall_permission_map);
-+
-+ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
-+ if (privcmd_intf != NULL)
-+ privcmd_intf->proc_fops = &privcmd_file_ops;
-+
-+ capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
-+ if (capabilities_intf != NULL)
-+ capabilities_intf->read_proc = capabilities_read;
-+
-+ return 0;
-+}
-+
-+__initcall(privcmd_init);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/tpmback/Makefile b/drivers/xen/tpmback/Makefile
-new file mode 100644
-index 0000000..d5865c4
---- /dev/null
-+++ b/drivers/xen/tpmback/Makefile
-@@ -0,0 +1,4 @@
-+
-+obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmbk.o
-+
-+tpmbk-y += tpmback.o interface.o xenbus.o
-diff --git a/drivers/xen/tpmback/common.h b/drivers/xen/tpmback/common.h
-new file mode 100644
-index 0000000..44a3d4c
---- /dev/null
-+++ b/drivers/xen/tpmback/common.h
-@@ -0,0 +1,91 @@
-+/******************************************************************************
-+ * drivers/xen/tpmback/common.h
-+ */
-+
-+#ifndef __NETIF__BACKEND__COMMON_H__
-+#define __NETIF__BACKEND__COMMON_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <xen/evtchn.h>
-+#include <xen/driver_util.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/tpmif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+ __FILE__ , __LINE__ , ## _a )
-+
-+typedef struct tpmif_st {
-+ struct list_head tpmif_list;
-+ /* Unique identifier for this interface. */
-+ domid_t domid;
-+ unsigned int handle;
-+
-+ /* Physical parameters of the comms window. */
-+ unsigned int evtchn;
-+ unsigned int irq;
-+
-+ /* The shared rings and indexes. */
-+ tpmif_tx_interface_t *tx;
-+ struct vm_struct *tx_area;
-+
-+ /* Miscellaneous private stuff. */
-+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-+ int active;
-+
-+ struct tpmif_st *hash_next;
-+ struct list_head list; /* scheduling list */
-+ atomic_t refcnt;
-+
-+ long int tpm_instance;
-+ unsigned long mmap_vstart;
-+
-+ struct work_struct work;
-+
-+ grant_handle_t shmem_handle;
-+ grant_ref_t shmem_ref;
-+} tpmif_t;
-+
-+void tpmif_disconnect_complete(tpmif_t * tpmif);
-+tpmif_t *tpmif_find(domid_t domid, long int instance);
-+void tpmif_interface_init(void);
-+void tpmif_interface_exit(void);
-+void tpmif_schedule_work(tpmif_t * tpmif);
-+void tpmif_deschedule_work(tpmif_t * tpmif);
-+void tpmif_xenbus_init(void);
-+void tpmif_xenbus_exit(void);
-+int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
-+irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domain, u32 instance);
-+int tpmif_vtpm_close(u32 instance);
-+
-+int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
-+
-+#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define tpmif_put(_b) \
-+ do { \
-+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-+ tpmif_disconnect_complete(_b); \
-+ } while (0)
-+
-+
-+extern int num_frontends;
-+
-+#define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
-+
-+#endif /* __TPMIF__BACKEND__COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/tpmback/interface.c b/drivers/xen/tpmback/interface.c
-new file mode 100644
-index 0000000..93ab2b7
---- /dev/null
-+++ b/drivers/xen/tpmback/interface.c
-@@ -0,0 +1,194 @@
-+ /*****************************************************************************
-+ * drivers/xen/tpmback/interface.c
-+ *
-+ * Vritual TPM interface management.
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb@us.ibm.com
-+ *
-+ * This code has been derived from drivers/xen/netback/interface.c
-+ * Copyright (c) 2004, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <xen/balloon.h>
-+
-+static kmem_cache_t *tpmif_cachep;
-+int num_frontends = 0;
-+
-+LIST_HEAD(tpmif_list);
-+
-+static tpmif_t *alloc_tpmif(domid_t domid, long int instance)
-+{
-+ struct page *page;
-+ tpmif_t *tpmif;
-+
-+ tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
-+ if (!tpmif)
-+ return ERR_PTR(-ENOMEM);
-+
-+ memset(tpmif, 0, sizeof (*tpmif));
-+ tpmif->domid = domid;
-+ tpmif->status = DISCONNECTED;
-+ tpmif->tpm_instance = instance;
-+ atomic_set(&tpmif->refcnt, 1);
-+
-+ page = balloon_alloc_empty_page_range(TPMIF_TX_RING_SIZE);
-+ BUG_ON(page == NULL);
-+ tpmif->mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-+
-+ list_add(&tpmif->tpmif_list, &tpmif_list);
-+ num_frontends++;
-+
-+ return tpmif;
-+}
-+
-+static void free_tpmif(tpmif_t * tpmif)
-+{
-+ num_frontends--;
-+ list_del(&tpmif->tpmif_list);
-+ kmem_cache_free(tpmif_cachep, tpmif);
-+}
-+
-+tpmif_t *tpmif_find(domid_t domid, long int instance)
-+{
-+ tpmif_t *tpmif;
-+
-+ list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
-+ if (tpmif->tpm_instance == instance) {
-+ if (tpmif->domid == domid) {
-+ tpmif_get(tpmif);
-+ return tpmif;
-+ } else {
-+ return ERR_PTR(-EEXIST);
-+ }
-+ }
-+ }
-+
-+ return alloc_tpmif(domid, instance);
-+}
-+
-+static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
-+{
-+ int ret;
-+ struct gnttab_map_grant_ref op = {
-+ .host_addr = (unsigned long)tpmif->tx_area->addr,
-+ .flags = GNTMAP_host_map,
-+ .ref = shared_page,
-+ .dom = tpmif->domid,
-+ };
-+
-+ lock_vm_area(tpmif->tx_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+ unlock_vm_area(tpmif->tx_area);
-+ BUG_ON(ret);
-+
-+ if (op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return op.status;
-+ }
-+
-+ tpmif->shmem_ref = shared_page;
-+ tpmif->shmem_handle = op.handle;
-+
-+ return 0;
-+}
-+
-+static void unmap_frontend_page(tpmif_t *tpmif)
-+{
-+ struct gnttab_unmap_grant_ref op;
-+ int ret;
-+
-+ op.host_addr = (unsigned long)tpmif->tx_area->addr;
-+ op.handle = tpmif->shmem_handle;
-+ op.dev_bus_addr = 0;
-+
-+ lock_vm_area(tpmif->tx_area);
-+ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+ unlock_vm_area(tpmif->tx_area);
-+ BUG_ON(ret);
-+}
-+
-+int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
-+{
-+ int err;
-+ evtchn_op_t op = {
-+ .cmd = EVTCHNOP_bind_interdomain,
-+ .u.bind_interdomain.remote_dom = tpmif->domid,
-+ .u.bind_interdomain.remote_port = evtchn,
-+ };
-+
-+ if (tpmif->irq) {
-+ return 0;
-+ }
-+
-+ if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
-+ return -ENOMEM;
-+
-+ err = map_frontend_page(tpmif, shared_page);
-+ if (err) {
-+ free_vm_area(tpmif->tx_area);
-+ return err;
-+ }
-+
-+ err = HYPERVISOR_event_channel_op(&op);
-+ if (err) {
-+ unmap_frontend_page(tpmif);
-+ free_vm_area(tpmif->tx_area);
-+ return err;
-+ }
-+
-+ tpmif->evtchn = op.u.bind_interdomain.local_port;
-+
-+ tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
-+
-+ tpmif->irq = bind_evtchn_to_irqhandler(
-+ tpmif->evtchn, tpmif_be_int, 0, "tpmif-backend", tpmif);
-+ tpmif->shmem_ref = shared_page;
-+ tpmif->active = 1;
-+
-+ return 0;
-+}
-+
-+static void __tpmif_disconnect_complete(void *arg)
-+{
-+ tpmif_t *tpmif = (tpmif_t *) arg;
-+
-+ if (tpmif->irq)
-+ unbind_from_irqhandler(tpmif->irq, tpmif);
-+
-+ if (tpmif->tx) {
-+ unmap_frontend_page(tpmif);
-+ free_vm_area(tpmif->tx_area);
-+ }
-+
-+ free_tpmif(tpmif);
-+}
-+
-+void tpmif_disconnect_complete(tpmif_t * tpmif)
-+{
-+ INIT_WORK(&tpmif->work, __tpmif_disconnect_complete, (void *)tpmif);
-+ schedule_work(&tpmif->work);
-+}
-+
-+void __init tpmif_interface_init(void)
-+{
-+ tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
-+ 0, 0, NULL, NULL);
-+}
-+
-+void __init tpmif_interface_exit(void)
-+{
-+ kmem_cache_destroy(tpmif_cachep);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/tpmback/tpmback.c b/drivers/xen/tpmback/tpmback.c
-new file mode 100644
-index 0000000..1fda74e
---- /dev/null
-+++ b/drivers/xen/tpmback/tpmback.c
-@@ -0,0 +1,1060 @@
-+/******************************************************************************
-+ * drivers/xen/tpmback/tpmback.c
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb@us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from drivers/xen/netback/netback.c
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ */
-+
-+#include "common.h"
-+#include <xen/evtchn.h>
-+
-+#include <linux/types.h>
-+#include <linux/list.h>
-+#include <linux/miscdevice.h>
-+#include <linux/poll.h>
-+#include <asm/uaccess.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/grant_table.h>
-+
-+/* local data structures */
-+struct data_exchange {
-+ struct list_head pending_pak;
-+ struct list_head current_pak;
-+ unsigned int copied_so_far;
-+ u8 has_opener;
-+ rwlock_t pak_lock; // protects all of the previous fields
-+ wait_queue_head_t wait_queue;
-+};
-+
-+struct vtpm_resp_hdr {
-+ uint32_t instance_no;
-+ uint16_t tag_no;
-+ uint32_t len_no;
-+ uint32_t ordinal_no;
-+} __attribute__ ((packed));
-+
-+struct packet {
-+ struct list_head next;
-+ unsigned int data_len;
-+ u8 *data_buffer;
-+ tpmif_t *tpmif;
-+ u32 tpm_instance;
-+ u8 req_tag;
-+ u32 last_read;
-+ u8 flags;
-+ struct timer_list processing_timer;
-+};
-+
-+enum {
-+ PACKET_FLAG_DISCARD_RESPONSE = 1,
-+ PACKET_FLAG_CHECK_RESPONSESTATUS = 2,
-+};
-+
-+/* local variables */
-+static struct data_exchange dataex;
-+
-+/* local function prototypes */
-+static int _packet_write(struct packet *pak,
-+ const char *data, size_t size, int userbuffer);
-+static void processing_timeout(unsigned long ptr);
-+static int packet_read_shmem(struct packet *pak,
-+ tpmif_t * tpmif,
-+ u32 offset,
-+ char *buffer, int isuserbuffer, u32 left);
-+static int vtpm_queue_packet(struct packet *pak);
-+
-+#define MIN(x,y) (x) < (y) ? (x) : (y)
-+
-+/***************************************************************
-+ Buffer copying fo user and kernel space buffes.
-+***************************************************************/
-+static inline int copy_from_buffer(void *to,
-+ const void *from, unsigned long size,
-+ int isuserbuffer)
-+{
-+ if (isuserbuffer) {
-+ if (copy_from_user(to, (void __user *)from, size))
-+ return -EFAULT;
-+ } else {
-+ memcpy(to, from, size);
-+ }
-+ return 0;
-+}
-+
-+static inline int copy_to_buffer(void *to,
-+ const void *from, unsigned long size,
-+ int isuserbuffer)
-+{
-+ if (isuserbuffer) {
-+ if (copy_to_user((void __user *)to, from, size))
-+ return -EFAULT;
-+ } else {
-+ memcpy(to, from, size);
-+ }
-+ return 0;
-+}
-+
-+/***************************************************************
-+ Packet-related functions
-+***************************************************************/
-+
-+static struct packet *packet_find_instance(struct list_head *head,
-+ u32 tpm_instance)
-+{
-+ struct packet *pak;
-+ struct list_head *p;
-+
-+ /*
-+ * traverse the list of packets and return the first
-+ * one with the given instance number
-+ */
-+ list_for_each(p, head) {
-+ pak = list_entry(p, struct packet, next);
-+
-+ if (pak->tpm_instance == tpm_instance) {
-+ return pak;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+static struct packet *packet_find_packet(struct list_head *head, void *packet)
-+{
-+ struct packet *pak;
-+ struct list_head *p;
-+
-+ /*
-+ * traverse the list of packets and return the first
-+ * one with the given instance number
-+ */
-+ list_for_each(p, head) {
-+ pak = list_entry(p, struct packet, next);
-+
-+ if (pak == packet) {
-+ return pak;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+static struct packet *packet_alloc(tpmif_t * tpmif,
-+ u32 size, u8 req_tag, u8 flags)
-+{
-+ struct packet *pak = NULL;
-+ pak = kzalloc(sizeof (struct packet), GFP_KERNEL);
-+ if (NULL != pak) {
-+ if (tpmif) {
-+ pak->tpmif = tpmif;
-+ pak->tpm_instance = tpmif->tpm_instance;
-+ }
-+ pak->data_len = size;
-+ pak->req_tag = req_tag;
-+ pak->last_read = 0;
-+ pak->flags = flags;
-+
-+ /*
-+ * cannot do tpmif_get(tpmif); bad things happen
-+ * on the last tpmif_put()
-+ */
-+ init_timer(&pak->processing_timer);
-+ pak->processing_timer.function = processing_timeout;
-+ pak->processing_timer.data = (unsigned long)pak;
-+ }
-+ return pak;
-+}
-+
-+static void inline packet_reset(struct packet *pak)
-+{
-+ pak->last_read = 0;
-+}
-+
-+static void packet_free(struct packet *pak)
-+{
-+ if (timer_pending(&pak->processing_timer)) {
-+ BUG();
-+ }
-+ kfree(pak->data_buffer);
-+ /*
-+ * cannot do tpmif_put(pak->tpmif); bad things happen
-+ * on the last tpmif_put()
-+ */
-+ kfree(pak);
-+}
-+
-+static int packet_set(struct packet *pak,
-+ const unsigned char *buffer, u32 size)
-+{
-+ int rc = 0;
-+ unsigned char *buf = kmalloc(size, GFP_KERNEL);
-+
-+ if (buf) {
-+ pak->data_buffer = buf;
-+ memcpy(buf, buffer, size);
-+ pak->data_len = size;
-+ } else {
-+ rc = -ENOMEM;
-+ }
-+ return rc;
-+}
-+
-+/*
-+ * Write data to the shared memory and send it to the FE.
-+ */
-+static int packet_write(struct packet *pak,
-+ const char *data, size_t size, int isuserbuffer)
-+{
-+ int rc = 0;
-+
-+ if ((pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) {
-+#ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
-+ u32 res;
-+
-+ if (copy_from_buffer(&res,
-+ &data[2 + 4], sizeof (res),
-+ isuserbuffer)) {
-+ return -EFAULT;
-+ }
-+
-+ if (res != 0) {
-+ /*
-+ * Close down this device. Should have the
-+ * FE notified about closure.
-+ */
-+ if (!pak->tpmif) {
-+ return -EFAULT;
-+ }
-+ pak->tpmif->status = DISCONNECTING;
-+ }
-+#endif
-+ }
-+
-+ if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
-+ /* Don't send a respone to this packet. Just acknowledge it. */
-+ rc = size;
-+ } else {
-+ rc = _packet_write(pak, data, size, isuserbuffer);
-+ }
-+
-+ return rc;
-+}
-+
-+int _packet_write(struct packet *pak,
-+ const char *data, size_t size, int isuserbuffer)
-+{
-+ /*
-+ * Write into the shared memory pages directly
-+ * and send it to the front end.
-+ */
-+ tpmif_t *tpmif = pak->tpmif;
-+ grant_handle_t handle;
-+ int rc = 0;
-+ unsigned int i = 0;
-+ unsigned int offset = 0;
-+
-+ if (tpmif == NULL) {
-+ return -EFAULT;
-+ }
-+
-+ if (tpmif->status == DISCONNECTED) {
-+ return size;
-+ }
-+
-+ while (offset < size && i < TPMIF_TX_RING_SIZE) {
-+ unsigned int tocopy;
-+ struct gnttab_map_grant_ref map_op;
-+ struct gnttab_unmap_grant_ref unmap_op;
-+ tpmif_tx_request_t *tx;
-+
-+ tx = &tpmif->tx->ring[i].req;
-+
-+ if (0 == tx->addr) {
-+ DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
-+ return 0;
-+ }
-+
-+ map_op.host_addr = MMAP_VADDR(tpmif, i);
-+ map_op.flags = GNTMAP_host_map;
-+ map_op.ref = tx->ref;
-+ map_op.dom = tpmif->domid;
-+
-+ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+ &map_op, 1))) {
-+ BUG();
-+ }
-+
-+ handle = map_op.handle;
-+
-+ if (map_op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return 0;
-+ }
-+ set_phys_to_machine(__pa(MMAP_VADDR(tpmif, i)) >> PAGE_SHIFT,
-+ FOREIGN_FRAME(map_op.
-+ dev_bus_addr >> PAGE_SHIFT));
-+
-+ tocopy = MIN(size - offset, PAGE_SIZE);
-+
-+ if (copy_from_buffer((void *)(MMAP_VADDR(tpmif, i) |
-+ (tx->addr & ~PAGE_MASK)),
-+ &data[offset], tocopy, isuserbuffer)) {
-+ tpmif_put(tpmif);
-+ return -EFAULT;
-+ }
-+ tx->size = tocopy;
-+
-+ unmap_op.host_addr = MMAP_VADDR(tpmif, i);
-+ unmap_op.handle = handle;
-+ unmap_op.dev_bus_addr = 0;
-+
-+ if (unlikely
-+ (HYPERVISOR_grant_table_op
-+ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
-+ BUG();
-+ }
-+
-+ offset += tocopy;
-+ i++;
-+ }
-+
-+ rc = offset;
-+ DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
-+ notify_remote_via_irq(tpmif->irq);
-+
-+ return rc;
-+}
-+
-+/*
-+ * Read data from the shared memory and copy it directly into the
-+ * provided buffer. Advance the read_last indicator which tells
-+ * how many bytes have already been read.
-+ */
-+static int packet_read(struct packet *pak, size_t numbytes,
-+ char *buffer, size_t buffersize, int isuserbuffer)
-+{
-+ tpmif_t *tpmif = pak->tpmif;
-+
-+ /*
-+ * Read 'numbytes' of data from the buffer. The first 4
-+ * bytes are the instance number in network byte order,
-+ * after that come the data from the shared memory buffer.
-+ */
-+ u32 to_copy;
-+ u32 offset = 0;
-+ u32 room_left = buffersize;
-+
-+ if (pak->last_read < 4) {
-+ /*
-+ * copy the instance number into the buffer
-+ */
-+ u32 instance_no = htonl(pak->tpm_instance);
-+ u32 last_read = pak->last_read;
-+
-+ to_copy = MIN(4 - last_read, numbytes);
-+
-+ if (copy_to_buffer(&buffer[0],
-+ &(((u8 *) & instance_no)[last_read]),
-+ to_copy, isuserbuffer)) {
-+ return -EFAULT;
-+ }
-+
-+ pak->last_read += to_copy;
-+ offset += to_copy;
-+ room_left -= to_copy;
-+ }
-+
-+ /*
-+ * If the packet has a data buffer appended, read from it...
-+ */
-+
-+ if (room_left > 0) {
-+ if (pak->data_buffer) {
-+ u32 to_copy = MIN(pak->data_len - offset, room_left);
-+ u32 last_read = pak->last_read - 4;
-+
-+ if (copy_to_buffer(&buffer[offset],
-+ &pak->data_buffer[last_read],
-+ to_copy, isuserbuffer)) {
-+ return -EFAULT;
-+ }
-+ pak->last_read += to_copy;
-+ offset += to_copy;
-+ } else {
-+ offset = packet_read_shmem(pak,
-+ tpmif,
-+ offset,
-+ buffer,
-+ isuserbuffer, room_left);
-+ }
-+ }
-+ return offset;
-+}
-+
-+static int packet_read_shmem(struct packet *pak,
-+ tpmif_t * tpmif,
-+ u32 offset, char *buffer, int isuserbuffer,
-+ u32 room_left)
-+{
-+ u32 last_read = pak->last_read - 4;
-+ u32 i = (last_read / PAGE_SIZE);
-+ u32 pg_offset = last_read & (PAGE_SIZE - 1);
-+ u32 to_copy;
-+ grant_handle_t handle;
-+
-+ tpmif_tx_request_t *tx;
-+
-+ tx = &tpmif->tx->ring[0].req;
-+ /*
-+ * Start copying data at the page with index 'index'
-+ * and within that page at offset 'offset'.
-+ * Copy a maximum of 'room_left' bytes.
-+ */
-+ to_copy = MIN(PAGE_SIZE - pg_offset, room_left);
-+ while (to_copy > 0) {
-+ void *src;
-+ struct gnttab_map_grant_ref map_op;
-+ struct gnttab_unmap_grant_ref unmap_op;
-+
-+ tx = &tpmif->tx->ring[i].req;
-+
-+ map_op.host_addr = MMAP_VADDR(tpmif, i);
-+ map_op.flags = GNTMAP_host_map;
-+ map_op.ref = tx->ref;
-+ map_op.dom = tpmif->domid;
-+
-+ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+ &map_op, 1))) {
-+ BUG();
-+ }
-+
-+ if (map_op.status) {
-+ DPRINTK(" Grant table operation failure !\n");
-+ return -EFAULT;
-+ }
-+
-+ handle = map_op.handle;
-+
-+ if (to_copy > tx->size) {
-+ /*
-+ * User requests more than what's available
-+ */
-+ to_copy = MIN(tx->size, to_copy);
-+ }
-+
-+ DPRINTK("Copying from mapped memory at %08lx\n",
-+ (unsigned long)(MMAP_VADDR(tpmif, i) |
-+ (tx->addr & ~PAGE_MASK)));
-+
-+ src = (void *)(MMAP_VADDR(tpmif, i) |
-+ ((tx->addr & ~PAGE_MASK) + pg_offset));
-+ if (copy_to_buffer(&buffer[offset],
-+ src, to_copy, isuserbuffer)) {
-+ return -EFAULT;
-+ }
-+
-+ DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
-+ tpmif->domid, buffer[offset], buffer[offset + 1],
-+ buffer[offset + 2], buffer[offset + 3]);
-+
-+ unmap_op.host_addr = MMAP_VADDR(tpmif, i);
-+ unmap_op.handle = handle;
-+ unmap_op.dev_bus_addr = 0;
-+
-+ if (unlikely
-+ (HYPERVISOR_grant_table_op
-+ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
-+ BUG();
-+ }
-+
-+ offset += to_copy;
-+ pg_offset = 0;
-+ last_read += to_copy;
-+ room_left -= to_copy;
-+
-+ to_copy = MIN(PAGE_SIZE, room_left);
-+ i++;
-+ } /* while (to_copy > 0) */
-+ /*
-+ * Adjust the last_read pointer
-+ */
-+ pak->last_read = last_read + 4;
-+ return offset;
-+}
-+
-+/* ============================================================
-+ * The file layer for reading data from this device
-+ * ============================================================
-+ */
-+static int vtpm_op_open(struct inode *inode, struct file *f)
-+{
-+ int rc = 0;
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ if (dataex.has_opener == 0) {
-+ dataex.has_opener = 1;
-+ } else {
-+ rc = -EPERM;
-+ }
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ return rc;
-+}
-+
-+static ssize_t vtpm_op_read(struct file *file,
-+ char __user * data, size_t size, loff_t * offset)
-+{
-+ int ret_size = -ENODATA;
-+ struct packet *pak = NULL;
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+
-+ if (list_empty(&dataex.pending_pak)) {
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ wait_event_interruptible(dataex.wait_queue,
-+ !list_empty(&dataex.pending_pak));
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ }
-+
-+ if (!list_empty(&dataex.pending_pak)) {
-+ unsigned int left;
-+ pak = list_entry(dataex.pending_pak.next, struct packet, next);
-+
-+ left = pak->data_len - dataex.copied_so_far;
-+
-+ DPRINTK("size given by app: %d, available: %d\n", size, left);
-+
-+ ret_size = MIN(size, left);
-+
-+ ret_size = packet_read(pak, ret_size, data, size, 1);
-+ if (ret_size < 0) {
-+ ret_size = -EFAULT;
-+ } else {
-+ DPRINTK("Copied %d bytes to user buffer\n", ret_size);
-+
-+ dataex.copied_so_far += ret_size;
-+ if (dataex.copied_so_far >= pak->data_len + 4) {
-+ DPRINTK("All data from this packet given to app.\n");
-+ /* All data given to app */
-+
-+ del_singleshot_timer_sync(&pak->
-+ processing_timer);
-+ list_del(&pak->next);
-+ list_add_tail(&pak->next, &dataex.current_pak);
-+ /*
-+ * The more fontends that are handled at the same time,
-+ * the more time we give the TPM to process the request.
-+ */
-+ mod_timer(&pak->processing_timer,
-+ jiffies + (num_frontends * 60 * HZ));
-+ dataex.copied_so_far = 0;
-+ }
-+ }
-+ }
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+ DPRINTK("Returning result from read to app: %d\n", ret_size);
-+
-+ return ret_size;
-+}
-+
-+/*
-+ * Write operation - only works after a previous read operation!
-+ */
-+static ssize_t vtpm_op_write(struct file *file,
-+ const char __user * data, size_t size,
-+ loff_t * offset)
-+{
-+ struct packet *pak;
-+ int rc = 0;
-+ unsigned int off = 4;
-+ unsigned long flags;
-+ struct vtpm_resp_hdr vrh;
-+
-+ /*
-+ * Minimum required packet size is:
-+ * 4 bytes for instance number
-+ * 2 bytes for tag
-+ * 4 bytes for paramSize
-+ * 4 bytes for the ordinal
-+ * sum: 14 bytes
-+ */
-+ if (size < sizeof (vrh))
-+ return -EFAULT;
-+
-+ if (copy_from_user(&vrh, data, sizeof (vrh)))
-+ return -EFAULT;
-+
-+ /* malformed packet? */
-+ if ((off + ntohl(vrh.len_no)) != size)
-+ return -EFAULT;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ pak = packet_find_instance(&dataex.current_pak,
-+ ntohl(vrh.instance_no));
-+
-+ if (pak == NULL) {
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ printk(KERN_ALERT "No associated packet! (inst=%d)\n",
-+ ntohl(vrh.instance_no));
-+ return -EFAULT;
-+ }
-+
-+ del_singleshot_timer_sync(&pak->processing_timer);
-+ list_del(&pak->next);
-+
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+ /*
-+ * The first 'offset' bytes must be the instance number - skip them.
-+ */
-+ size -= off;
-+
-+ rc = packet_write(pak, &data[off], size, 1);
-+
-+ if (rc > 0) {
-+ /* I neglected the first 4 bytes */
-+ rc += off;
-+ }
-+ packet_free(pak);
-+ return rc;
-+}
-+
-+static int vtpm_op_release(struct inode *inode, struct file *file)
-+{
-+ unsigned long flags;
-+
-+ vtpm_release_packets(NULL, 1);
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ dataex.has_opener = 0;
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ return 0;
-+}
-+
-+static unsigned int vtpm_op_poll(struct file *file,
-+ struct poll_table_struct *pts)
-+{
-+ unsigned int flags = POLLOUT | POLLWRNORM;
-+
-+ poll_wait(file, &dataex.wait_queue, pts);
-+ if (!list_empty(&dataex.pending_pak)) {
-+ flags |= POLLIN | POLLRDNORM;
-+ }
-+ return flags;
-+}
-+
-+static struct file_operations vtpm_ops = {
-+ .owner = THIS_MODULE,
-+ .llseek = no_llseek,
-+ .open = vtpm_op_open,
-+ .read = vtpm_op_read,
-+ .write = vtpm_op_write,
-+ .release = vtpm_op_release,
-+ .poll = vtpm_op_poll,
-+};
-+
-+static struct miscdevice vtpms_miscdevice = {
-+ .minor = 225,
-+ .name = "vtpm",
-+ .fops = &vtpm_ops,
-+};
-+
-+/***************************************************************
-+ Virtual TPM functions and data stuctures
-+***************************************************************/
-+
-+static u8 create_cmd[] = {
-+ 1, 193, /* 0: TPM_TAG_RQU_COMMAMD */
-+ 0, 0, 0, 19, /* 2: length */
-+ 0, 0, 0, 0x1, /* 6: VTPM_ORD_OPEN */
-+ 0, /* 10: VTPM type */
-+ 0, 0, 0, 0, /* 11: domain id */
-+ 0, 0, 0, 0 /* 15: instance id */
-+};
-+
-+int tpmif_vtpm_open(tpmif_t * tpmif, domid_t domid, u32 instance)
-+{
-+ int rc = 0;
-+ struct packet *pak;
-+
-+ pak = packet_alloc(tpmif,
-+ sizeof (create_cmd),
-+ create_cmd[1],
-+ PACKET_FLAG_DISCARD_RESPONSE |
-+ PACKET_FLAG_CHECK_RESPONSESTATUS);
-+ if (pak) {
-+ u8 buf[sizeof (create_cmd)];
-+ u32 domid_no = htonl((u32) domid);
-+ u32 instance_no = htonl(instance);
-+
-+ memcpy(buf, create_cmd, sizeof (create_cmd));
-+
-+ memcpy(&buf[11], &domid_no, sizeof (u32));
-+ memcpy(&buf[15], &instance_no, sizeof (u32));
-+
-+ /* copy the buffer into the packet */
-+ rc = packet_set(pak, buf, sizeof (buf));
-+
-+ if (rc == 0) {
-+ pak->tpm_instance = 0;
-+ rc = vtpm_queue_packet(pak);
-+ }
-+ if (rc < 0) {
-+ /* could not be queued or built */
-+ packet_free(pak);
-+ }
-+ } else {
-+ rc = -ENOMEM;
-+ }
-+ return rc;
-+}
-+
-+static u8 destroy_cmd[] = {
-+ 1, 193, /* 0: TPM_TAG_RQU_COMMAMD */
-+ 0, 0, 0, 14, /* 2: length */
-+ 0, 0, 0, 0x2, /* 6: VTPM_ORD_CLOSE */
-+ 0, 0, 0, 0 /* 10: instance id */
-+};
-+
-+int tpmif_vtpm_close(u32 instid)
-+{
-+ int rc = 0;
-+ struct packet *pak;
-+
-+ pak = packet_alloc(NULL,
-+ sizeof (destroy_cmd),
-+ destroy_cmd[1], PACKET_FLAG_DISCARD_RESPONSE);
-+ if (pak) {
-+ u8 buf[sizeof (destroy_cmd)];
-+ u32 instid_no = htonl(instid);
-+
-+ memcpy(buf, destroy_cmd, sizeof (destroy_cmd));
-+ memcpy(&buf[10], &instid_no, sizeof (u32));
-+
-+ /* copy the buffer into the packet */
-+ rc = packet_set(pak, buf, sizeof (buf));
-+
-+ if (rc == 0) {
-+ pak->tpm_instance = 0;
-+ rc = vtpm_queue_packet(pak);
-+ }
-+ if (rc < 0) {
-+ /* could not be queued or built */
-+ packet_free(pak);
-+ }
-+ } else {
-+ rc = -ENOMEM;
-+ }
-+ return rc;
-+}
-+
-+/***************************************************************
-+ Utility functions
-+***************************************************************/
-+
-+static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
-+{
-+ int rc;
-+ static const unsigned char tpm_error_message_fail[] = {
-+ 0x00, 0x00,
-+ 0x00, 0x00, 0x00, 0x0a,
-+ 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
-+ };
-+ unsigned char buffer[sizeof (tpm_error_message_fail)];
-+
-+ memcpy(buffer, tpm_error_message_fail,
-+ sizeof (tpm_error_message_fail));
-+ /*
-+ * Insert the right response tag depending on the given tag
-+ * All response tags are '+3' to the request tag.
-+ */
-+ buffer[1] = req_tag + 3;
-+
-+ /*
-+ * Write the data to shared memory and notify the front-end
-+ */
-+ rc = packet_write(pak, buffer, sizeof (buffer), 0);
-+
-+ return rc;
-+}
-+
-+static void _vtpm_release_packets(struct list_head *head,
-+ tpmif_t * tpmif, int send_msgs)
-+{
-+ struct packet *pak;
-+ struct list_head *pos,
-+ *tmp;
-+
-+ list_for_each_safe(pos, tmp, head) {
-+ pak = list_entry(pos, struct packet, next);
-+
-+ if (tpmif == NULL || pak->tpmif == tpmif) {
-+ int can_send = 0;
-+
-+ del_singleshot_timer_sync(&pak->processing_timer);
-+ list_del(&pak->next);
-+
-+ if (pak->tpmif && pak->tpmif->status == CONNECTED) {
-+ can_send = 1;
-+ }
-+
-+ if (send_msgs && can_send) {
-+ tpm_send_fail_message(pak, pak->req_tag);
-+ }
-+ packet_free(pak);
-+ }
-+ }
-+}
-+
-+int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
-+{
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+
-+ _vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
-+ _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
-+
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+ return 0;
-+}
-+
-+static int vtpm_queue_packet(struct packet *pak)
-+{
-+ int rc = 0;
-+
-+ if (dataex.has_opener) {
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ list_add_tail(&pak->next, &dataex.pending_pak);
-+ /* give the TPM some time to pick up the request */
-+ mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+ wake_up_interruptible(&dataex.wait_queue);
-+ } else {
-+ rc = -EFAULT;
-+ }
-+ return rc;
-+}
-+
-+static int vtpm_receive(tpmif_t * tpmif, u32 size)
-+{
-+ int rc = 0;
-+ unsigned char buffer[10];
-+ __be32 *native_size;
-+ struct packet *pak = packet_alloc(tpmif, size, 0, 0);
-+
-+ if (!pak)
-+ return -ENOMEM;
-+ /*
-+ * Read 10 bytes from the received buffer to test its
-+ * content for validity.
-+ */
-+ if (sizeof (buffer) != packet_read(pak,
-+ sizeof (buffer), buffer,
-+ sizeof (buffer), 0)) {
-+ goto failexit;
-+ }
-+ /*
-+ * Reset the packet read pointer so we can read all its
-+ * contents again.
-+ */
-+ packet_reset(pak);
-+
-+ native_size = (__force __be32 *) (&buffer[4 + 2]);
-+ /*
-+ * Verify that the size of the packet is correct
-+ * as indicated and that there's actually someone reading packets.
-+ * The minimum size of the packet is '10' for tag, size indicator
-+ * and ordinal.
-+ */
-+ if (size < 10 ||
-+ be32_to_cpu(*native_size) != size ||
-+ 0 == dataex.has_opener || tpmif->status != CONNECTED) {
-+ rc = -EINVAL;
-+ goto failexit;
-+ } else {
-+ rc = vtpm_queue_packet(pak);
-+ if (rc < 0)
-+ goto failexit;
-+ }
-+ return 0;
-+
-+ failexit:
-+ if (pak) {
-+ tpm_send_fail_message(pak, buffer[4 + 1]);
-+ packet_free(pak);
-+ }
-+ return rc;
-+}
-+
-+/*
-+ * Timeout function that gets invoked when a packet has not been processed
-+ * during the timeout period.
-+ * The packet must be on a list when this function is invoked. This
-+ * also means that once its taken off a list, the timer must be
-+ * destroyed as well.
-+ */
-+static void processing_timeout(unsigned long ptr)
-+{
-+ struct packet *pak = (struct packet *)ptr;
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&dataex.pak_lock, flags);
-+ /*
-+ * The packet needs to be searched whether it
-+ * is still on the list.
-+ */
-+ if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
-+ pak == packet_find_packet(&dataex.current_pak, pak)) {
-+ list_del(&pak->next);
-+ if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
-+ tpm_send_fail_message(pak, pak->req_tag);
-+ }
-+ packet_free(pak);
-+ }
-+
-+ write_unlock_irqrestore(&dataex.pak_lock, flags);
-+}
-+
-+static void tpm_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
-+
-+static struct list_head tpm_schedule_list;
-+static spinlock_t tpm_schedule_list_lock;
-+
-+static inline void maybe_schedule_tx_action(void)
-+{
-+ smp_mb();
-+ tasklet_schedule(&tpm_tx_tasklet);
-+}
-+
-+static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
-+{
-+ return tpmif->list.next != NULL;
-+}
-+
-+static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
-+{
-+ spin_lock_irq(&tpm_schedule_list_lock);
-+ if (likely(__on_tpm_schedule_list(tpmif))) {
-+ list_del(&tpmif->list);
-+ tpmif->list.next = NULL;
-+ tpmif_put(tpmif);
-+ }
-+ spin_unlock_irq(&tpm_schedule_list_lock);
-+}
-+
-+static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
-+{
-+ if (__on_tpm_schedule_list(tpmif))
-+ return;
-+
-+ spin_lock_irq(&tpm_schedule_list_lock);
-+ if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
-+ list_add_tail(&tpmif->list, &tpm_schedule_list);
-+ tpmif_get(tpmif);
-+ }
-+ spin_unlock_irq(&tpm_schedule_list_lock);
-+}
-+
-+void tpmif_schedule_work(tpmif_t * tpmif)
-+{
-+ add_to_tpm_schedule_list_tail(tpmif);
-+ maybe_schedule_tx_action();
-+}
-+
-+void tpmif_deschedule_work(tpmif_t * tpmif)
-+{
-+ remove_from_tpm_schedule_list(tpmif);
-+}
-+
-+static void tpm_tx_action(unsigned long unused)
-+{
-+ struct list_head *ent;
-+ tpmif_t *tpmif;
-+ tpmif_tx_request_t *tx;
-+
-+ DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
-+
-+ while (!list_empty(&tpm_schedule_list)) {
-+ /* Get a tpmif from the list with work to do. */
-+ ent = tpm_schedule_list.next;
-+ tpmif = list_entry(ent, tpmif_t, list);
-+ tpmif_get(tpmif);
-+ remove_from_tpm_schedule_list(tpmif);
-+
-+ tx = &tpmif->tx->ring[0].req;
-+
-+ /* pass it up */
-+ vtpm_receive(tpmif, tx->size);
-+
-+ tpmif_put(tpmif);
-+ }
-+}
-+
-+irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+ tpmif_t *tpmif = (tpmif_t *) dev_id;
-+
-+ add_to_tpm_schedule_list_tail(tpmif);
-+ maybe_schedule_tx_action();
-+ return IRQ_HANDLED;
-+}
-+
-+static int __init tpmback_init(void)
-+{
-+ int rc;
-+
-+ if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
-+ printk(KERN_ALERT
-+ "Could not register misc device for TPM BE.\n");
-+ return rc;
-+ }
-+
-+ INIT_LIST_HEAD(&dataex.pending_pak);
-+ INIT_LIST_HEAD(&dataex.current_pak);
-+ dataex.has_opener = 0;
-+ rwlock_init(&dataex.pak_lock);
-+ init_waitqueue_head(&dataex.wait_queue);
-+
-+ spin_lock_init(&tpm_schedule_list_lock);
-+ INIT_LIST_HEAD(&tpm_schedule_list);
-+
-+ tpmif_interface_init();
-+ tpmif_xenbus_init();
-+
-+ printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
-+
-+ return 0;
-+}
-+
-+module_init(tpmback_init);
-+
-+static void __exit tpmback_exit(void)
-+{
-+ tpmif_xenbus_exit();
-+ tpmif_interface_exit();
-+ misc_deregister(&vtpms_miscdevice);
-+}
-+
-+module_exit(tpmback_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/tpmback/xenbus.c b/drivers/xen/tpmback/xenbus.c
-new file mode 100644
-index 0000000..9a2be8d
---- /dev/null
-+++ b/drivers/xen/tpmback/xenbus.c
-@@ -0,0 +1,333 @@
-+/* Xenbus code for tpmif backend
-+ Copyright (C) 2005 IBM Corporation
-+ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
-+
-+ This program is free software; you can redistribute it and/or modify
-+ it under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 2 of the License, or
-+ (at your option) any later version.
-+
-+ This program is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ GNU General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with this program; if not, write to the Free Software
-+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+*/
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <xen/xenbus.h>
-+#include "common.h"
-+
-+struct backend_info
-+{
-+ struct xenbus_device *dev;
-+
-+ /* our communications channel */
-+ tpmif_t *tpmif;
-+
-+ long int frontend_id;
-+ long int instance; // instance of TPM
-+ u8 is_instance_set;// whether instance number has been set
-+
-+ /* watch front end for changes */
-+ struct xenbus_watch backend_watch;
-+ XenbusState frontend_state;
-+};
-+
-+static void maybe_connect(struct backend_info *be);
-+static void connect(struct backend_info *be);
-+static int connect_ring(struct backend_info *be);
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len);
-+static void frontend_changed(struct xenbus_device *dev,
-+ XenbusState frontend_state);
-+
-+static int tpmback_remove(struct xenbus_device *dev)
-+{
-+ struct backend_info *be = dev->data;
-+
-+ if (be->backend_watch.node) {
-+ unregister_xenbus_watch(&be->backend_watch);
-+ kfree(be->backend_watch.node);
-+ be->backend_watch.node = NULL;
-+ }
-+ if (be->tpmif) {
-+ tpmif_put(be->tpmif);
-+ be->tpmif = NULL;
-+ }
-+ kfree(be);
-+ dev->data = NULL;
-+ return 0;
-+}
-+
-+static int tpmback_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ struct backend_info *be = kmalloc(sizeof(struct backend_info),
-+ GFP_KERNEL);
-+
-+ if (!be) {
-+ xenbus_dev_fatal(dev, -ENOMEM,
-+ "allocating backend structure");
-+ return -ENOMEM;
-+ }
-+
-+ memset(be, 0, sizeof(*be));
-+
-+ be->is_instance_set = 0;
-+ be->dev = dev;
-+ dev->data = be;
-+
-+ err = xenbus_watch_path2(dev, dev->nodename,
-+ "instance", &be->backend_watch,
-+ backend_changed);
-+ if (err) {
-+ goto fail;
-+ }
-+
-+ err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
-+ if (err) {
-+ goto fail;
-+ }
-+ return 0;
-+fail:
-+ tpmback_remove(dev);
-+ return err;
-+}
-+
-+
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ int err;
-+ long instance;
-+ struct backend_info *be
-+ = container_of(watch, struct backend_info, backend_watch);
-+ struct xenbus_device *dev = be->dev;
-+
-+ err = xenbus_scanf(XBT_NULL, dev->nodename,
-+ "instance","%li", &instance);
-+ if (XENBUS_EXIST_ERR(err)) {
-+ return;
-+ }
-+
-+ if (err != 1) {
-+ xenbus_dev_fatal(dev, err, "reading instance");
-+ return;
-+ }
-+
-+ if (be->is_instance_set != 0 && be->instance != instance) {
-+ printk(KERN_WARNING
-+ "tpmback: changing instance (from %ld to %ld) "
-+ "not allowed.\n",
-+ be->instance, instance);
-+ return;
-+ }
-+
-+ if (be->is_instance_set == 0) {
-+ be->tpmif = tpmif_find(dev->otherend_id,
-+ instance);
-+ if (IS_ERR(be->tpmif)) {
-+ err = PTR_ERR(be->tpmif);
-+ be->tpmif = NULL;
-+ xenbus_dev_fatal(dev,err,"creating block interface");
-+ return;
-+ }
-+ be->instance = instance;
-+ be->is_instance_set = 1;
-+
-+ /*
-+ * There's an unfortunate problem:
-+ * Sometimes after a suspend/resume the
-+ * state switch to XenbusStateInitialised happens
-+ * *before* I get to this point here. Since then
-+ * the connect_ring() must have failed (be->tpmif is
-+ * still NULL), I just call it here again indirectly.
-+ */
-+ if (be->frontend_state == XenbusStateInitialised) {
-+ frontend_changed(dev, be->frontend_state);
-+ }
-+ }
-+}
-+
-+
-+static void frontend_changed(struct xenbus_device *dev,
-+ XenbusState frontend_state)
-+{
-+ struct backend_info *be = dev->data;
-+ int err;
-+
-+ be->frontend_state = frontend_state;
-+
-+ switch (frontend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateConnected:
-+ break;
-+
-+ case XenbusStateInitialised:
-+ err = connect_ring(be);
-+ if (err) {
-+ return;
-+ }
-+ maybe_connect(be);
-+ break;
-+
-+ case XenbusStateClosing:
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
-+ break;
-+
-+ case XenbusStateClosed:
-+ /*
-+ * Notify the vTPM manager about the front-end
-+ * having left.
-+ */
-+ tpmif_vtpm_close(be->instance);
-+ device_unregister(&be->dev->dev);
-+ break;
-+
-+ case XenbusStateUnknown:
-+ case XenbusStateInitWait:
-+ default:
-+ xenbus_dev_fatal(dev, -EINVAL,
-+ "saw state %d at frontend",
-+ frontend_state);
-+ break;
-+ }
-+}
-+
-+
-+
-+static void maybe_connect(struct backend_info *be)
-+{
-+ int err;
-+
-+ if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
-+ return;
-+
-+ connect(be);
-+
-+ /*
-+ * Notify the vTPM manager about a new front-end.
-+ */
-+ err = tpmif_vtpm_open(be->tpmif,
-+ be->frontend_id,
-+ be->instance);
-+ if (err) {
-+ xenbus_dev_error(be->dev, err,
-+ "queueing vtpm open packet");
-+ /*
-+ * Should close down this device and notify FE
-+ * about closure.
-+ */
-+ return;
-+ }
-+}
-+
-+
-+static void connect(struct backend_info *be)
-+{
-+ xenbus_transaction_t xbt;
-+ int err;
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long ready = 1;
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(be->dev, err, "starting transaction");
-+ return;
-+ }
-+
-+ err = xenbus_printf(xbt, be->dev->nodename,
-+ "ready", "%lu", ready);
-+ if (err) {
-+ xenbus_dev_fatal(be->dev, err, "writing 'ready'");
-+ goto abort;
-+ }
-+
-+ err = xenbus_switch_state(dev, xbt, XenbusStateConnected);
-+ if (err)
-+ goto abort;
-+
-+ be->tpmif->status = CONNECTED;
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+ if (err) {
-+ xenbus_dev_fatal(be->dev, err, "end of transaction");
-+ }
-+ return;
-+abort:
-+ xenbus_transaction_end(xbt, 1);
-+}
-+
-+
-+static int connect_ring(struct backend_info *be)
-+{
-+ struct xenbus_device *dev = be->dev;
-+ unsigned long ring_ref;
-+ unsigned int evtchn;
-+ int err;
-+
-+ err = xenbus_gather(XBT_NULL, dev->otherend,
-+ "ring-ref", "%lu", &ring_ref,
-+ "event-channel", "%u", &evtchn, NULL);
-+ if (err) {
-+ xenbus_dev_error(dev, err,
-+ "reading %s/ring-ref and event-channel",
-+ dev->otherend);
-+ return err;
-+ }
-+ if (be->tpmif != NULL) {
-+ err = tpmif_map(be->tpmif, ring_ref, evtchn);
-+ if (err) {
-+ xenbus_dev_error(dev, err,
-+ "mapping shared-frame %lu port %u",
-+ ring_ref, evtchn);
-+ return err;
-+ }
-+ }
-+ return 0;
-+}
-+
-+
-+static struct xenbus_device_id tpmback_ids[] = {
-+ { "vtpm" },
-+ { "" }
-+};
-+
-+
-+static struct xenbus_driver tpmback = {
-+ .name = "vtpm",
-+ .owner = THIS_MODULE,
-+ .ids = tpmback_ids,
-+ .probe = tpmback_probe,
-+ .remove = tpmback_remove,
-+ .otherend_changed = frontend_changed,
-+};
-+
-+
-+void tpmif_xenbus_init(void)
-+{
-+ xenbus_register_backend(&tpmback);
-+}
-+
-+void tpmif_xenbus_exit(void)
-+{
-+ xenbus_unregister_driver(&tpmback);
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/tpmfront/Makefile b/drivers/xen/tpmfront/Makefile
-new file mode 100644
-index 0000000..d43666f
---- /dev/null
-+++ b/drivers/xen/tpmfront/Makefile
-@@ -0,0 +1,2 @@
-+
-+obj-$(CONFIG_XEN_TPMDEV_FRONTEND) += tpmfront.o
-diff --git a/drivers/xen/tpmfront/tpmfront.c b/drivers/xen/tpmfront/tpmfront.c
-new file mode 100644
-index 0000000..62e3d56
---- /dev/null
-+++ b/drivers/xen/tpmfront/tpmfront.c
-@@ -0,0 +1,728 @@
-+/*
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb@us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from drivers/xen/netfront/netfront.c
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/slab.h>
-+#include <linux/errno.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <xen/tpmfe.h>
-+#include <linux/err.h>
-+
-+#include <asm/semaphore.h>
-+#include <asm/io.h>
-+#include <xen/evtchn.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/tpmif.h>
-+#include <asm/uaccess.h>
-+#include <xen/xenbus.h>
-+#include <xen/interface/grant_table.h>
-+
-+#include "tpmfront.h"
-+
-+#undef DEBUG
-+
-+/* locally visible variables */
-+static grant_ref_t gref_head;
-+static struct tpm_private *my_priv;
-+
-+/* local function prototypes */
-+static irqreturn_t tpmif_int(int irq,
-+ void *tpm_priv,
-+ struct pt_regs *ptregs);
-+static void tpmif_rx_action(unsigned long unused);
-+static void tpmif_connect(struct tpm_private *tp, domid_t domid);
-+static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
-+static int tpm_allocate_buffers(struct tpm_private *tp);
-+static void tpmif_set_connected_state(struct tpm_private *tp,
-+ u8 newstate);
-+static int tpm_xmit(struct tpm_private *tp,
-+ const u8 * buf, size_t count, int userbuffer,
-+ void *remember);
-+
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
-+#define IPRINTK(fmt, args...) \
-+ printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+ printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
-+
-+
-+static inline int
-+tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
-+ int isuserbuffer)
-+{
-+ int copied = len;
-+
-+ if (len > txb->size) {
-+ copied = txb->size;
-+ }
-+ if (isuserbuffer) {
-+ if (copy_from_user(txb->data, src, copied))
-+ return -EFAULT;
-+ } else {
-+ memcpy(txb->data, src, copied);
-+ }
-+ txb->len = len;
-+ return copied;
-+}
-+
-+static inline struct tx_buffer *tx_buffer_alloc(void)
-+{
-+ struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
-+ GFP_KERNEL);
-+
-+ if (txb) {
-+ txb->len = 0;
-+ txb->size = PAGE_SIZE;
-+ txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
-+ if (txb->data == NULL) {
-+ kfree(txb);
-+ txb = NULL;
-+ }
-+ }
-+ return txb;
-+}
-+
-+
-+/**************************************************************
-+ Utility function for the tpm_private structure
-+**************************************************************/
-+static inline void tpm_private_init(struct tpm_private *tp)
-+{
-+ spin_lock_init(&tp->tx_lock);
-+ init_waitqueue_head(&tp->wait_q);
-+}
-+
-+static struct tpm_private *tpm_private_get(void)
-+{
-+ if (!my_priv) {
-+ my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
-+ if (my_priv) {
-+ tpm_private_init(my_priv);
-+ }
-+ }
-+ return my_priv;
-+}
-+
-+static inline void tpm_private_free(void)
-+{
-+ kfree(my_priv);
-+ my_priv = NULL;
-+}
-+
-+/**************************************************************
-+
-+ The interface to let the tpm plugin register its callback
-+ function and send data to another partition using this module
-+
-+**************************************************************/
-+
-+static DECLARE_MUTEX(upperlayer_lock);
-+static DECLARE_MUTEX(suspend_lock);
-+static struct tpmfe_device *upperlayer_tpmfe;
-+
-+/*
-+ * Send data via this module by calling this function
-+ */
-+int tpm_fe_send(struct tpm_private *tp, const u8 * buf, size_t count, void *ptr)
-+{
-+ int sent;
-+
-+ down(&suspend_lock);
-+ sent = tpm_xmit(tp, buf, count, 0, ptr);
-+ up(&suspend_lock);
-+
-+ return sent;
-+}
-+EXPORT_SYMBOL(tpm_fe_send);
-+
-+/*
-+ * Register a callback for receiving data from this module
-+ */
-+int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
-+{
-+ int rc = 0;
-+
-+ down(&upperlayer_lock);
-+ if (NULL == upperlayer_tpmfe) {
-+ upperlayer_tpmfe = tpmfe_dev;
-+ tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
-+ tpmfe_dev->tpm_private = tpm_private_get();
-+ if (!tpmfe_dev->tpm_private) {
-+ rc = -ENOMEM;
-+ }
-+ } else {
-+ rc = -EBUSY;
-+ }
-+ up(&upperlayer_lock);
-+ return rc;
-+}
-+EXPORT_SYMBOL(tpm_fe_register_receiver);
-+
-+/*
-+ * Unregister the callback for receiving data from this module
-+ */
-+void tpm_fe_unregister_receiver(void)
-+{
-+ down(&upperlayer_lock);
-+ upperlayer_tpmfe = NULL;
-+ up(&upperlayer_lock);
-+}
-+EXPORT_SYMBOL(tpm_fe_unregister_receiver);
-+
-+/*
-+ * Call this function to send data to the upper layer's
-+ * registered receiver function.
-+ */
-+static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
-+ const void *ptr)
-+{
-+ int rc = 0;
-+
-+ down(&upperlayer_lock);
-+
-+ if (upperlayer_tpmfe && upperlayer_tpmfe->receive)
-+ rc = upperlayer_tpmfe->receive(buf, count, ptr);
-+
-+ up(&upperlayer_lock);
-+ return rc;
-+}
-+
-+/**************************************************************
-+ XENBUS support code
-+**************************************************************/
-+
-+static int setup_tpmring(struct xenbus_device *dev,
-+ struct tpm_private *tp)
-+{
-+ tpmif_tx_interface_t *sring;
-+ int err;
-+
-+ sring = (void *)__get_free_page(GFP_KERNEL);
-+ if (!sring) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-+ return -ENOMEM;
-+ }
-+ tp->tx = sring;
-+
-+ tpm_allocate_buffers(tp);
-+
-+ err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
-+ if (err < 0) {
-+ free_page((unsigned long)sring);
-+ tp->tx = NULL;
-+ xenbus_dev_fatal(dev, err, "allocating grant reference");
-+ goto fail;
-+ }
-+ tp->ring_ref = err;
-+
-+ err = xenbus_alloc_evtchn(dev, &tp->evtchn);
-+ if (err)
-+ goto fail;
-+
-+ tpmif_connect(tp, dev->otherend_id);
-+
-+ return 0;
-+fail:
-+ return err;
-+}
-+
-+
-+static void destroy_tpmring(struct tpm_private *tp)
-+{
-+ tpmif_set_connected_state(tp, 0);
-+ if (tp->tx != NULL) {
-+ gnttab_end_foreign_access(tp->ring_ref, 0,
-+ (unsigned long)tp->tx);
-+ tp->tx = NULL;
-+ }
-+
-+ if (tp->irq)
-+ unbind_from_irqhandler(tp->irq, NULL);
-+ tp->evtchn = tp->irq = 0;
-+}
-+
-+
-+static int talk_to_backend(struct xenbus_device *dev,
-+ struct tpm_private *tp)
-+{
-+ const char *message = NULL;
-+ int err;
-+ xenbus_transaction_t xbt;
-+
-+ err = setup_tpmring(dev, tp);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "setting up ring");
-+ goto out;
-+ }
-+
-+again:
-+ err = xenbus_transaction_start(&xbt);
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "starting transaction");
-+ goto destroy_tpmring;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "ring-ref","%u", tp->ring_ref);
-+ if (err) {
-+ message = "writing ring-ref";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_printf(xbt, dev->nodename,
-+ "event-channel", "%u", tp->evtchn);
-+ if (err) {
-+ message = "writing event-channel";
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_switch_state(dev, xbt, XenbusStateInitialised);
-+ if (err) {
-+ goto abort_transaction;
-+ }
-+
-+ err = xenbus_transaction_end(xbt, 0);
-+ if (err == -EAGAIN)
-+ goto again;
-+ if (err) {
-+ xenbus_dev_fatal(dev, err, "completing transaction");
-+ goto destroy_tpmring;
-+ }
-+ return 0;
-+
-+abort_transaction:
-+ xenbus_transaction_end(xbt, 1);
-+ if (message)
-+ xenbus_dev_error(dev, err, "%s", message);
-+destroy_tpmring:
-+ destroy_tpmring(tp);
-+out:
-+ return err;
-+}
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+ XenbusState backend_state)
-+{
-+ struct tpm_private *tp = dev->data;
-+ DPRINTK("\n");
-+
-+ switch (backend_state) {
-+ case XenbusStateInitialising:
-+ case XenbusStateInitWait:
-+ case XenbusStateInitialised:
-+ case XenbusStateUnknown:
-+ break;
-+
-+ case XenbusStateConnected:
-+ tpmif_set_connected_state(tp, 1);
-+ break;
-+
-+ case XenbusStateClosing:
-+ tpmif_set_connected_state(tp, 0);
-+ break;
-+
-+ case XenbusStateClosed:
-+ if (tp->is_suspended == 0) {
-+ device_unregister(&dev->dev);
-+ }
-+ break;
-+ }
-+}
-+
-+
-+static int tpmfront_probe(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id)
-+{
-+ int err;
-+ int handle;
-+ struct tpm_private *tp = tpm_private_get();
-+
-+ err = xenbus_scanf(XBT_NULL, dev->nodename,
-+ "handle", "%i", &handle);
-+ if (XENBUS_EXIST_ERR(err))
-+ return err;
-+
-+ if (err < 0) {
-+ xenbus_dev_fatal(dev,err,"reading virtual-device");
-+ return err;
-+ }
-+
-+ tp->dev = dev;
-+ dev->data = tp;
-+
-+ err = talk_to_backend(dev, tp);
-+ if (err) {
-+ tpm_private_free();
-+ dev->data = NULL;
-+ return err;
-+ }
-+ return 0;
-+}
-+
-+
-+static int tpmfront_remove(struct xenbus_device *dev)
-+{
-+ struct tpm_private *tp = dev->data;
-+ destroy_tpmring(tp);
-+ return 0;
-+}
-+
-+static int
-+tpmfront_suspend(struct xenbus_device *dev)
-+{
-+ struct tpm_private *tp = dev->data;
-+ u32 ctr;
-+
-+ /* lock, so no app can send */
-+ down(&suspend_lock);
-+ tp->is_suspended = 1;
-+
-+ for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
-+ if ((ctr % 10) == 0)
-+ printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
-+ /*
-+ * Wait for a request to be responded to.
-+ */
-+ interruptible_sleep_on_timeout(&tp->wait_q, 100);
-+ }
-+
-+ if (atomic_read(&tp->tx_busy)) {
-+ /*
-+ * A temporary work-around.
-+ */
-+ printk("TPM-FE [WARNING]: Resetting busy flag.");
-+ atomic_set(&tp->tx_busy, 0);
-+ }
-+
-+ return 0;
-+}
-+
-+static int
-+tpmfront_resume(struct xenbus_device *dev)
-+{
-+ struct tpm_private *tp = dev->data;
-+ return talk_to_backend(dev, tp);
-+}
-+
-+static void
-+tpmif_connect(struct tpm_private *tp, domid_t domid)
-+{
-+ int err;
-+
-+ tp->backend_id = domid;
-+
-+ err = bind_evtchn_to_irqhandler(tp->evtchn,
-+ tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
-+ tp);
-+ if (err <= 0) {
-+ WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
-+ return;
-+ }
-+
-+ tp->irq = err;
-+}
-+
-+static struct xenbus_device_id tpmfront_ids[] = {
-+ { "vtpm" },
-+ { "" }
-+};
-+
-+static struct xenbus_driver tpmfront = {
-+ .name = "vtpm",
-+ .owner = THIS_MODULE,
-+ .ids = tpmfront_ids,
-+ .probe = tpmfront_probe,
-+ .remove = tpmfront_remove,
-+ .resume = tpmfront_resume,
-+ .otherend_changed = backend_changed,
-+ .suspend = tpmfront_suspend,
-+};
-+
-+static void __init init_tpm_xenbus(void)
-+{
-+ xenbus_register_frontend(&tpmfront);
-+}
-+
-+static void __exit exit_tpm_xenbus(void)
-+{
-+ xenbus_unregister_driver(&tpmfront);
-+}
-+
-+
-+static int
-+tpm_allocate_buffers(struct tpm_private *tp)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
-+ tp->tx_buffers[i] = tx_buffer_alloc();
-+ return 1;
-+}
-+
-+static void
-+tpmif_rx_action(unsigned long priv)
-+{
-+ struct tpm_private *tp = (struct tpm_private *)priv;
-+
-+ int i = 0;
-+ unsigned int received;
-+ unsigned int offset = 0;
-+ u8 *buffer;
-+ tpmif_tx_request_t *tx;
-+ tx = &tp->tx->ring[i].req;
-+
-+ received = tx->size;
-+
-+ buffer = kmalloc(received, GFP_KERNEL);
-+ if (NULL == buffer) {
-+ goto exit;
-+ }
-+
-+ for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
-+ struct tx_buffer *txb = tp->tx_buffers[i];
-+ tpmif_tx_request_t *tx;
-+ unsigned int tocopy;
-+
-+ tx = &tp->tx->ring[i].req;
-+ tocopy = tx->size;
-+ if (tocopy > PAGE_SIZE) {
-+ tocopy = PAGE_SIZE;
-+ }
-+
-+ memcpy(&buffer[offset], txb->data, tocopy);
-+
-+ gnttab_release_grant_reference(&gref_head, tx->ref);
-+
-+ offset += tocopy;
-+ }
-+
-+ tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
-+ kfree(buffer);
-+
-+exit:
-+ atomic_set(&tp->tx_busy, 0);
-+ wake_up_interruptible(&tp->wait_q);
-+}
-+
-+
-+static irqreturn_t
-+tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
-+{
-+ struct tpm_private *tp = tpm_priv;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&tp->tx_lock, flags);
-+ tpmif_rx_tasklet.data = (unsigned long)tp;
-+ tasklet_schedule(&tpmif_rx_tasklet);
-+ spin_unlock_irqrestore(&tp->tx_lock, flags);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+
-+static int
-+tpm_xmit(struct tpm_private *tp,
-+ const u8 * buf, size_t count, int isuserbuffer,
-+ void *remember)
-+{
-+ tpmif_tx_request_t *tx;
-+ TPMIF_RING_IDX i;
-+ unsigned int offset = 0;
-+
-+ spin_lock_irq(&tp->tx_lock);
-+
-+ if (unlikely(atomic_read(&tp->tx_busy))) {
-+ printk("tpm_xmit: There's an outstanding request/response "
-+ "on the way!\n");
-+ spin_unlock_irq(&tp->tx_lock);
-+ return -EBUSY;
-+ }
-+
-+ if (tp->is_connected != 1) {
-+ spin_unlock_irq(&tp->tx_lock);
-+ return -EIO;
-+ }
-+
-+ for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
-+ struct tx_buffer *txb = tp->tx_buffers[i];
-+ int copied;
-+
-+ if (NULL == txb) {
-+ DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
-+ "Not transmitting anything!\n", i);
-+ spin_unlock_irq(&tp->tx_lock);
-+ return -EFAULT;
-+ }
-+ copied = tx_buffer_copy(txb, &buf[offset], count,
-+ isuserbuffer);
-+ if (copied < 0) {
-+ /* An error occurred */
-+ spin_unlock_irq(&tp->tx_lock);
-+ return copied;
-+ }
-+ count -= copied;
-+ offset += copied;
-+
-+ tx = &tp->tx->ring[i].req;
-+
-+ tx->addr = virt_to_machine(txb->data);
-+ tx->size = txb->len;
-+
-+ DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
-+ txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
-+
-+ /* get the granttable reference for this page */
-+ tx->ref = gnttab_claim_grant_reference(&gref_head);
-+
-+ if (-ENOSPC == tx->ref) {
-+ spin_unlock_irq(&tp->tx_lock);
-+ DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
-+ return -ENOSPC;
-+ }
-+ gnttab_grant_foreign_access_ref( tx->ref,
-+ tp->backend_id,
-+ (tx->addr >> PAGE_SHIFT),
-+ 0 /*RW*/);
-+ wmb();
-+ }
-+
-+ atomic_set(&tp->tx_busy, 1);
-+ tp->tx_remember = remember;
-+ mb();
-+
-+ DPRINTK("Notifying backend via event channel %d\n",
-+ tp->evtchn);
-+
-+ notify_remote_via_irq(tp->irq);
-+
-+ spin_unlock_irq(&tp->tx_lock);
-+ return offset;
-+}
-+
-+
-+static void tpmif_notify_upperlayer(struct tpm_private *tp)
-+{
-+ /*
-+ * Notify upper layer about the state of the connection
-+ * to the BE.
-+ */
-+ down(&upperlayer_lock);
-+
-+ if (upperlayer_tpmfe != NULL) {
-+ if (tp->is_connected) {
-+ upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
-+ } else {
-+ upperlayer_tpmfe->status(0);
-+ }
-+ }
-+ up(&upperlayer_lock);
-+}
-+
-+
-+static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
-+{
-+ /*
-+ * Don't notify upper layer if we are in suspend mode and
-+ * should disconnect - assumption is that we will resume
-+ * The semaphore keeps apps from sending.
-+ */
-+ if (is_connected == 0 && tp->is_suspended == 1) {
-+ return;
-+ }
-+
-+ /*
-+ * Unlock the semaphore if we are connected again
-+ * after being suspended - now resuming.
-+ * This also removes the suspend state.
-+ */
-+ if (is_connected == 1 && tp->is_suspended == 1) {
-+ tp->is_suspended = 0;
-+ /* unlock, so apps can resume sending */
-+ up(&suspend_lock);
-+ }
-+
-+ if (is_connected != tp->is_connected) {
-+ tp->is_connected = is_connected;
-+ tpmif_notify_upperlayer(tp);
-+ }
-+}
-+
-+
-+/* =================================================================
-+ * Initialization function.
-+ * =================================================================
-+ */
-+
-+static int __init
-+tpmif_init(void)
-+{
-+ IPRINTK("Initialising the vTPM driver.\n");
-+ if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
-+ &gref_head ) < 0) {
-+ return -EFAULT;
-+ }
-+
-+ init_tpm_xenbus();
-+
-+ return 0;
-+}
-+
-+module_init(tpmif_init);
-+
-+static void __exit
-+tpmif_exit(void)
-+{
-+ exit_tpm_xenbus();
-+ gnttab_free_grant_references(gref_head);
-+}
-+
-+module_exit(tpmif_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/tpmfront/tpmfront.h b/drivers/xen/tpmfront/tpmfront.h
-new file mode 100644
-index 0000000..66568ba
---- /dev/null
-+++ b/drivers/xen/tpmfront/tpmfront.h
-@@ -0,0 +1,40 @@
-+#ifndef TPM_FRONT_H
-+#define TPM_FRONT_H
-+
-+struct tpm_private {
-+ tpmif_tx_interface_t *tx;
-+ unsigned int evtchn;
-+ unsigned int irq;
-+ u8 is_connected;
-+ u8 is_suspended;
-+
-+ spinlock_t tx_lock;
-+
-+ struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
-+
-+ atomic_t tx_busy;
-+ void *tx_remember;
-+ domid_t backend_id;
-+ wait_queue_head_t wait_q;
-+
-+ struct xenbus_device *dev;
-+ int ring_ref;
-+};
-+
-+struct tx_buffer {
-+ unsigned int size; // available space in data
-+ unsigned int len; // used space in data
-+ unsigned char *data; // pointer to a page
-+};
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/util.c b/drivers/xen/util.c
-new file mode 100644
-index 0000000..7f76a39
---- /dev/null
-+++ b/drivers/xen/util.c
-@@ -0,0 +1,80 @@
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <asm/uaccess.h>
-+#include <xen/driver_util.h>
-+
-+static int f(pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-+{
-+ /* generic_page_range() does all the hard work. */
-+ return 0;
-+}
-+
-+struct vm_struct *alloc_vm_area(unsigned long size)
-+{
-+ struct vm_struct *area;
-+
-+ area = get_vm_area(size, VM_IOREMAP);
-+ if (area == NULL)
-+ return NULL;
-+
-+ /*
-+ * This ensures that page tables are constructed for this region
-+ * of kernel virtual address space and mapped into init_mm.
-+ */
-+ if (generic_page_range(&init_mm, (unsigned long)area->addr,
-+ area->size, f, NULL)) {
-+ free_vm_area(area);
-+ return NULL;
-+ }
-+
-+ return area;
-+}
-+EXPORT_SYMBOL_GPL(alloc_vm_area);
-+
-+void free_vm_area(struct vm_struct *area)
-+{
-+ struct vm_struct *ret;
-+ ret = remove_vm_area(area->addr);
-+ BUG_ON(ret != area);
-+ kfree(area);
-+}
-+EXPORT_SYMBOL_GPL(free_vm_area);
-+
-+void lock_vm_area(struct vm_struct *area)
-+{
-+ unsigned long i;
-+ char c;
-+
-+ /*
-+ * Prevent context switch to a lazy mm that doesn't have this area
-+ * mapped into its page tables.
-+ */
-+ preempt_disable();
-+
-+ /*
-+ * Ensure that the page tables are mapped into the current mm. The
-+ * page-fault path will copy the page directory pointers from init_mm.
-+ */
-+ for (i = 0; i < area->size; i += PAGE_SIZE)
-+ (void)__get_user(c, (char __user *)area->addr + i);
-+}
-+EXPORT_SYMBOL_GPL(lock_vm_area);
-+
-+void unlock_vm_area(struct vm_struct *area)
-+{
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(unlock_vm_area);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
-new file mode 100644
-index 0000000..4ef620c
---- /dev/null
-+++ b/drivers/xen/xenbus/Makefile
-@@ -0,0 +1,8 @@
-+obj-y += xenbus.o
-+
-+xenbus-objs =
-+xenbus-objs += xenbus_client.o
-+xenbus-objs += xenbus_comms.o
-+xenbus-objs += xenbus_xs.o
-+xenbus-objs += xenbus_probe.o
-+xenbus-objs += xenbus_dev.o
-diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
-new file mode 100644
-index 0000000..fea04d0
---- /dev/null
-+++ b/drivers/xen/xenbus/xenbus_client.c
-@@ -0,0 +1,405 @@
-+/******************************************************************************
-+ * Client-facing interface for the Xenbus driver. In other words, the
-+ * interface between the Xenbus and the device-specific code, be it the
-+ * frontend or the backend of that driver.
-+ *
-+ * Copyright (C) 2005 XenSource Ltd
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <xen/evtchn.h>
-+#include <xen/gnttab.h>
-+#include <xen/xenbus.h>
-+#include <xen/driver_util.h>
-+
-+/* xenbus_probe.c */
-+extern char *kasprintf(const char *fmt, ...);
-+
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+
-+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
-+ struct xenbus_watch *watch,
-+ void (*callback)(struct xenbus_watch *,
-+ const char **, unsigned int))
-+{
-+ int err;
-+
-+ watch->node = path;
-+ watch->callback = callback;
-+
-+ err = register_xenbus_watch(watch);
-+
-+ if (err) {
-+ watch->node = NULL;
-+ watch->callback = NULL;
-+ xenbus_dev_fatal(dev, err, "adding watch on %s", path);
-+ }
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(xenbus_watch_path);
-+
-+
-+int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
-+ const char *path2, struct xenbus_watch *watch,
-+ void (*callback)(struct xenbus_watch *,
-+ const char **, unsigned int))
-+{
-+ int err;
-+ char *state = kasprintf("%s/%s", path, path2);
-+ if (!state) {
-+ xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
-+ return -ENOMEM;
-+ }
-+ err = xenbus_watch_path(dev, state, watch, callback);
-+
-+ if (err)
-+ kfree(state);
-+ return err;
-+}
-+EXPORT_SYMBOL(xenbus_watch_path2);
-+
-+
-+int xenbus_switch_state(struct xenbus_device *dev,
-+ xenbus_transaction_t xbt,
-+ XenbusState state)
-+{
-+ /* We check whether the state is currently set to the given value, and
-+ if not, then the state is set. We don't want to unconditionally
-+ write the given state, because we don't want to fire watches
-+ unnecessarily. Furthermore, if the node has gone, we don't write
-+ to it, as the device will be tearing down, and we don't want to
-+ resurrect that directory.
-+ */
-+
-+ int current_state;
-+ int err;
-+
-+ if (state == dev->state)
-+ return 0;
-+
-+ err = xenbus_scanf(xbt, dev->nodename, "state", "%d",
-+ &current_state);
-+ if (err != 1)
-+ return 0;
-+
-+ err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
-+ if (err) {
-+ if (state != XenbusStateClosing) /* Avoid looping */
-+ xenbus_dev_fatal(dev, err, "writing new state");
-+ return err;
-+ }
-+
-+ dev->state = state;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(xenbus_switch_state);
-+
-+
-+/**
-+ * Return the path to the error node for the given device, or NULL on failure.
-+ * If the value returned is non-NULL, then it is the caller's to kfree.
-+ */
-+static char *error_path(struct xenbus_device *dev)
-+{
-+ return kasprintf("error/%s", dev->nodename);
-+}
-+
-+
-+void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+ va_list ap)
-+{
-+ int ret;
-+ unsigned int len;
-+ char *printf_buffer = NULL, *path_buffer = NULL;
-+
-+#define PRINTF_BUFFER_SIZE 4096
-+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-+ if (printf_buffer == NULL)
-+ goto fail;
-+
-+ len = sprintf(printf_buffer, "%i ", -err);
-+ ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
-+
-+ BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
-+
-+ dev_err(&dev->dev, "%s\n", printf_buffer);
-+
-+ path_buffer = error_path(dev);
-+
-+ if (path_buffer == NULL) {
-+ printk("xenbus: failed to write error node for %s (%s)\n",
-+ dev->nodename, printf_buffer);
-+ goto fail;
-+ }
-+
-+ if (xenbus_write(XBT_NULL, path_buffer, "error", printf_buffer) != 0) {
-+ printk("xenbus: failed to write error node for %s (%s)\n",
-+ dev->nodename, printf_buffer);
-+ goto fail;
-+ }
-+
-+fail:
-+ if (printf_buffer)
-+ kfree(printf_buffer);
-+ if (path_buffer)
-+ kfree(path_buffer);
-+}
-+
-+
-+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+ ...)
-+{
-+ va_list ap;
-+
-+ va_start(ap, fmt);
-+ _dev_error(dev, err, fmt, ap);
-+ va_end(ap);
-+}
-+EXPORT_SYMBOL(xenbus_dev_error);
-+
-+
-+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
-+ ...)
-+{
-+ va_list ap;
-+
-+ va_start(ap, fmt);
-+ _dev_error(dev, err, fmt, ap);
-+ va_end(ap);
-+
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
-+}
-+EXPORT_SYMBOL(xenbus_dev_fatal);
-+
-+
-+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
-+{
-+ int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
-+ if (err < 0)
-+ xenbus_dev_fatal(dev, err, "granting access to ring page");
-+ return err;
-+}
-+EXPORT_SYMBOL(xenbus_grant_ring);
-+
-+
-+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
-+{
-+ evtchn_op_t op = {
-+ .cmd = EVTCHNOP_alloc_unbound,
-+ .u.alloc_unbound.dom = DOMID_SELF,
-+ .u.alloc_unbound.remote_dom = dev->otherend_id
-+ };
-+ int err = HYPERVISOR_event_channel_op(&op);
-+ if (err)
-+ xenbus_dev_fatal(dev, err, "allocating event channel");
-+ else
-+ *port = op.u.alloc_unbound.port;
-+ return err;
-+}
-+EXPORT_SYMBOL(xenbus_alloc_evtchn);
-+
-+
-+int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
-+{
-+ evtchn_op_t op = {
-+ .cmd = EVTCHNOP_bind_interdomain,
-+ .u.bind_interdomain.remote_dom = dev->otherend_id,
-+ .u.bind_interdomain.remote_port = remote_port,
-+ };
-+ int err = HYPERVISOR_event_channel_op(&op);
-+ if (err)
-+ xenbus_dev_fatal(dev, err,
-+ "binding to event channel %d from domain %d",
-+ remote_port, dev->otherend_id);
-+ else
-+ *port = op.u.bind_interdomain.local_port;
-+ return err;
-+}
-+EXPORT_SYMBOL(xenbus_bind_evtchn);
-+
-+
-+int xenbus_free_evtchn(struct xenbus_device *dev, int port)
-+{
-+ evtchn_op_t op = {
-+ .cmd = EVTCHNOP_close,
-+ .u.close.port = port,
-+ };
-+ int err = HYPERVISOR_event_channel_op(&op);
-+ if (err)
-+ xenbus_dev_error(dev, err, "freeing event channel %d", port);
-+ return err;
-+}
-+
-+
-+/* Based on Rusty Russell's skeleton driver's map_page */
-+int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
-+{
-+ struct gnttab_map_grant_ref op = {
-+ .flags = GNTMAP_host_map,
-+ .ref = gnt_ref,
-+ .dom = dev->otherend_id,
-+ };
-+ struct vm_struct *area;
-+
-+ *vaddr = NULL;
-+
-+ area = alloc_vm_area(PAGE_SIZE);
-+ if (!area)
-+ return -ENOMEM;
-+
-+ op.host_addr = (unsigned long)area->addr;
-+
-+ lock_vm_area(area);
-+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
-+ unlock_vm_area(area);
-+
-+ if (op.status != GNTST_okay) {
-+ free_vm_area(area);
-+ xenbus_dev_fatal(dev, op.status,
-+ "mapping in shared page %d from domain %d",
-+ gnt_ref, dev->otherend_id);
-+ return op.status;
-+ }
-+
-+ /* Stuff the handle in an unused field */
-+ area->phys_addr = (unsigned long)op.handle;
-+
-+ *vaddr = area->addr;
-+ return 0;
-+}
-+EXPORT_SYMBOL(xenbus_map_ring_valloc);
-+
-+
-+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
-+ grant_handle_t *handle, void *vaddr)
-+{
-+ struct gnttab_map_grant_ref op = {
-+ .host_addr = (unsigned long)vaddr,
-+ .flags = GNTMAP_host_map,
-+ .ref = gnt_ref,
-+ .dom = dev->otherend_id,
-+ };
-+
-+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
-+
-+ if (op.status != GNTST_okay) {
-+ xenbus_dev_fatal(dev, op.status,
-+ "mapping in shared page %d from domain %d",
-+ gnt_ref, dev->otherend_id);
-+ } else
-+ *handle = op.handle;
-+
-+ return op.status;
-+}
-+EXPORT_SYMBOL(xenbus_map_ring);
-+
-+
-+/* Based on Rusty Russell's skeleton driver's unmap_page */
-+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
-+{
-+ struct vm_struct *area;
-+ struct gnttab_unmap_grant_ref op = {
-+ .host_addr = (unsigned long)vaddr,
-+ };
-+
-+ /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
-+ * method so that we don't have to muck with vmalloc internals here.
-+ * We could force the user to hang on to their struct vm_struct from
-+ * xenbus_map_ring_valloc, but these 6 lines considerably simplify
-+ * this API.
-+ */
-+ read_lock(&vmlist_lock);
-+ for (area = vmlist; area != NULL; area = area->next) {
-+ if (area->addr == vaddr)
-+ break;
-+ }
-+ read_unlock(&vmlist_lock);
-+
-+ if (!area) {
-+ xenbus_dev_error(dev, -ENOENT,
-+ "can't find mapped virtual address %p", vaddr);
-+ return GNTST_bad_virt_addr;
-+ }
-+
-+ op.handle = (grant_handle_t)area->phys_addr;
-+
-+ lock_vm_area(area);
-+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
-+ unlock_vm_area(area);
-+
-+ if (op.status == GNTST_okay)
-+ free_vm_area(area);
-+ else
-+ xenbus_dev_error(dev, op.status,
-+ "unmapping page at handle %d error %d",
-+ (int16_t)area->phys_addr, op.status);
-+
-+ return op.status;
-+}
-+EXPORT_SYMBOL(xenbus_unmap_ring_vfree);
-+
-+
-+int xenbus_unmap_ring(struct xenbus_device *dev,
-+ grant_handle_t handle, void *vaddr)
-+{
-+ struct gnttab_unmap_grant_ref op = {
-+ .host_addr = (unsigned long)vaddr,
-+ .handle = handle,
-+ };
-+
-+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
-+
-+ if (op.status != GNTST_okay)
-+ xenbus_dev_error(dev, op.status,
-+ "unmapping page at handle %d error %d",
-+ handle, op.status);
-+
-+ return op.status;
-+}
-+EXPORT_SYMBOL(xenbus_unmap_ring);
-+
-+
-+XenbusState xenbus_read_driver_state(const char *path)
-+{
-+ XenbusState result;
-+ int err = xenbus_gather(XBT_NULL, path, "state", "%d", &result, NULL);
-+ if (err)
-+ result = XenbusStateClosed;
-+
-+ return result;
-+}
-+EXPORT_SYMBOL(xenbus_read_driver_state);
-+
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
-new file mode 100644
-index 0000000..6d06b4d
---- /dev/null
-+++ b/drivers/xen/xenbus/xenbus_comms.c
-@@ -0,0 +1,211 @@
-+/******************************************************************************
-+ * xenbus_comms.c
-+ *
-+ * Low level code to talks to Xen Store: ringbuffer and event channel.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <asm/hypervisor.h>
-+#include <xen/evtchn.h>
-+#include <linux/wait.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/err.h>
-+#include <xen/xenbus.h>
-+#include "xenbus_comms.h"
-+
-+static int xenbus_irq;
-+
-+extern void xenbus_probe(void *);
-+extern int xenstored_ready;
-+static DECLARE_WORK(probe_work, xenbus_probe, NULL);
-+
-+DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
-+
-+static inline struct xenstore_domain_interface *xenstore_domain_interface(void)
-+{
-+ return mfn_to_virt(xen_start_info->store_mfn);
-+}
-+
-+static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
-+{
-+ if (unlikely(xenstored_ready == 0)) {
-+ xenstored_ready = 1;
-+ schedule_work(&probe_work);
-+ }
-+
-+ wake_up(&xb_waitq);
-+ return IRQ_HANDLED;
-+}
-+
-+static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
-+{
-+ return ((prod - cons) <= XENSTORE_RING_SIZE);
-+}
-+
-+static void *get_output_chunk(XENSTORE_RING_IDX cons,
-+ XENSTORE_RING_IDX prod,
-+ char *buf, uint32_t *len)
-+{
-+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
-+ if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
-+ *len = XENSTORE_RING_SIZE - (prod - cons);
-+ return buf + MASK_XENSTORE_IDX(prod);
-+}
-+
-+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
-+ XENSTORE_RING_IDX prod,
-+ const char *buf, uint32_t *len)
-+{
-+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
-+ if ((prod - cons) < *len)
-+ *len = prod - cons;
-+ return buf + MASK_XENSTORE_IDX(cons);
-+}
-+
-+int xb_write(const void *data, unsigned len)
-+{
-+ struct xenstore_domain_interface *intf = xenstore_domain_interface();
-+ XENSTORE_RING_IDX cons, prod;
-+ int rc;
-+
-+ while (len != 0) {
-+ void *dst;
-+ unsigned int avail;
-+
-+ rc = wait_event_interruptible(
-+ xb_waitq,
-+ (intf->req_prod - intf->req_cons) !=
-+ XENSTORE_RING_SIZE);
-+ if (rc < 0)
-+ return rc;
-+
-+ /* Read indexes, then verify. */
-+ cons = intf->req_cons;
-+ prod = intf->req_prod;
-+ mb();
-+ if (!check_indexes(cons, prod))
-+ return -EIO;
-+
-+ dst = get_output_chunk(cons, prod, intf->req, &avail);
-+ if (avail == 0)
-+ continue;
-+ if (avail > len)
-+ avail = len;
-+
-+ memcpy(dst, data, avail);
-+ data += avail;
-+ len -= avail;
-+
-+ /* Other side must not see new header until data is there. */
-+ wmb();
-+ intf->req_prod += avail;
-+
-+ /* This implies mb() before other side sees interrupt. */
-+ notify_remote_via_evtchn(xen_start_info->store_evtchn);
-+ }
-+
-+ return 0;
-+}
-+
-+int xb_read(void *data, unsigned len)
-+{
-+ struct xenstore_domain_interface *intf = xenstore_domain_interface();
-+ XENSTORE_RING_IDX cons, prod;
-+ int rc;
-+
-+ while (len != 0) {
-+ unsigned int avail;
-+ const char *src;
-+
-+ rc = wait_event_interruptible(
-+ xb_waitq,
-+ intf->rsp_cons != intf->rsp_prod);
-+ if (rc < 0)
-+ return rc;
-+
-+ /* Read indexes, then verify. */
-+ cons = intf->rsp_cons;
-+ prod = intf->rsp_prod;
-+ mb();
-+ if (!check_indexes(cons, prod))
-+ return -EIO;
-+
-+ src = get_input_chunk(cons, prod, intf->rsp, &avail);
-+ if (avail == 0)
-+ continue;
-+ if (avail > len)
-+ avail = len;
-+
-+ /* We must read header before we read data. */
-+ rmb();
-+
-+ memcpy(data, src, avail);
-+ data += avail;
-+ len -= avail;
-+
-+ /* Other side must not see free space until we've copied out */
-+ mb();
-+ intf->rsp_cons += avail;
-+
-+ pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
-+
-+ /* Implies mb(): they will see new header. */
-+ notify_remote_via_evtchn(xen_start_info->store_evtchn);
-+ }
-+
-+ return 0;
-+}
-+
-+/* Set up interrupt handler off store event channel. */
-+int xb_init_comms(void)
-+{
-+ int err;
-+
-+ if (xenbus_irq)
-+ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
-+
-+ err = bind_evtchn_to_irqhandler(
-+ xen_start_info->store_evtchn, wake_waiting,
-+ 0, "xenbus", &xb_waitq);
-+ if (err <= 0) {
-+ printk(KERN_ERR "XENBUS request irq failed %i\n", err);
-+ return err;
-+ }
-+
-+ xenbus_irq = err;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
-new file mode 100644
-index 0000000..59ca0d3
---- /dev/null
-+++ b/drivers/xen/xenbus/xenbus_comms.h
-@@ -0,0 +1,50 @@
-+/*
-+ * Private include for xenbus communications.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef _XENBUS_COMMS_H
-+#define _XENBUS_COMMS_H
-+
-+int xs_init(void);
-+int xb_init_comms(void);
-+
-+/* Low level routines. */
-+int xb_write(const void *data, unsigned len);
-+int xb_read(void *data, unsigned len);
-+int xs_input_avail(void);
-+extern wait_queue_head_t xb_waitq;
-+
-+#endif /* _XENBUS_COMMS_H */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/xenbus/xenbus_dev.c b/drivers/xen/xenbus/xenbus_dev.c
-new file mode 100644
-index 0000000..cdab7e8
---- /dev/null
-+++ b/drivers/xen/xenbus/xenbus_dev.c
-@@ -0,0 +1,238 @@
-+/*
-+ * xenbus_dev.c
-+ *
-+ * Driver giving user-space access to the kernel's xenbus connection
-+ * to xenstore.
-+ *
-+ * Copyright (c) 2005, Christian Limpach
-+ * Copyright (c) 2005, Rusty Russell, IBM Corporation
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/uio.h>
-+#include <linux/notifier.h>
-+#include <linux/wait.h>
-+#include <linux/fs.h>
-+
-+#include "xenbus_comms.h"
-+
-+#include <asm/uaccess.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/xen_proc.h>
-+#include <asm/hypervisor.h>
-+
-+struct xenbus_dev_transaction {
-+ struct list_head list;
-+ xenbus_transaction_t handle;
-+};
-+
-+struct xenbus_dev_data {
-+ /* In-progress transaction. */
-+ struct list_head transactions;
-+
-+ /* Partial request. */
-+ unsigned int len;
-+ union {
-+ struct xsd_sockmsg msg;
-+ char buffer[PAGE_SIZE];
-+ } u;
-+
-+ /* Response queue. */
-+#define MASK_READ_IDX(idx) ((idx)&(PAGE_SIZE-1))
-+ char read_buffer[PAGE_SIZE];
-+ unsigned int read_cons, read_prod;
-+ wait_queue_head_t read_waitq;
-+};
-+
-+static struct proc_dir_entry *xenbus_dev_intf;
-+
-+static ssize_t xenbus_dev_read(struct file *filp,
-+ char __user *ubuf,
-+ size_t len, loff_t *ppos)
-+{
-+ struct xenbus_dev_data *u = filp->private_data;
-+ int i;
-+
-+ if (wait_event_interruptible(u->read_waitq,
-+ u->read_prod != u->read_cons))
-+ return -EINTR;
-+
-+ for (i = 0; i < len; i++) {
-+ if (u->read_cons == u->read_prod)
-+ break;
-+ put_user(u->read_buffer[MASK_READ_IDX(u->read_cons)], ubuf+i);
-+ u->read_cons++;
-+ }
-+
-+ return i;
-+}
-+
-+static void queue_reply(struct xenbus_dev_data *u,
-+ char *data, unsigned int len)
-+{
-+ int i;
-+
-+ for (i = 0; i < len; i++, u->read_prod++)
-+ u->read_buffer[MASK_READ_IDX(u->read_prod)] = data[i];
-+
-+ BUG_ON((u->read_prod - u->read_cons) > sizeof(u->read_buffer));
-+
-+ wake_up(&u->read_waitq);
-+}
-+
-+static ssize_t xenbus_dev_write(struct file *filp,
-+ const char __user *ubuf,
-+ size_t len, loff_t *ppos)
-+{
-+ struct xenbus_dev_data *u = filp->private_data;
-+ struct xenbus_dev_transaction *trans = NULL;
-+ void *reply;
-+
-+ if ((len + u->len) > sizeof(u->u.buffer))
-+ return -EINVAL;
-+
-+ if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0)
-+ return -EFAULT;
-+
-+ u->len += len;
-+ if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
-+ return len;
-+
-+ switch (u->u.msg.type) {
-+ case XS_TRANSACTION_START:
-+ case XS_TRANSACTION_END:
-+ case XS_DIRECTORY:
-+ case XS_READ:
-+ case XS_GET_PERMS:
-+ case XS_RELEASE:
-+ case XS_GET_DOMAIN_PATH:
-+ case XS_WRITE:
-+ case XS_MKDIR:
-+ case XS_RM:
-+ case XS_SET_PERMS:
-+ if (u->u.msg.type == XS_TRANSACTION_START) {
-+ trans = kmalloc(sizeof(*trans), GFP_KERNEL);
-+ if (!trans)
-+ return -ENOMEM;
-+ }
-+
-+ reply = xenbus_dev_request_and_reply(&u->u.msg);
-+ if (IS_ERR(reply)) {
-+ kfree(trans);
-+ return PTR_ERR(reply);
-+ }
-+
-+ if (u->u.msg.type == XS_TRANSACTION_START) {
-+ trans->handle = simple_strtoul(reply, NULL, 0);
-+ list_add(&trans->list, &u->transactions);
-+ } else if (u->u.msg.type == XS_TRANSACTION_END) {
-+ list_for_each_entry(trans, &u->transactions, list)
-+ if (trans->handle == u->u.msg.tx_id)
-+ break;
-+ BUG_ON(&trans->list == &u->transactions);
-+ list_del(&trans->list);
-+ kfree(trans);
-+ }
-+ queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
-+ queue_reply(u, (char *)reply, u->u.msg.len);
-+ kfree(reply);
-+ break;
-+
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ u->len = 0;
-+ return len;
-+}
-+
-+static int xenbus_dev_open(struct inode *inode, struct file *filp)
-+{
-+ struct xenbus_dev_data *u;
-+
-+ if (xen_start_info->store_evtchn == 0)
-+ return -ENOENT;
-+
-+ nonseekable_open(inode, filp);
-+
-+ u = kmalloc(sizeof(*u), GFP_KERNEL);
-+ if (u == NULL)
-+ return -ENOMEM;
-+
-+ memset(u, 0, sizeof(*u));
-+ INIT_LIST_HEAD(&u->transactions);
-+ init_waitqueue_head(&u->read_waitq);
-+
-+ filp->private_data = u;
-+
-+ return 0;
-+}
-+
-+static int xenbus_dev_release(struct inode *inode, struct file *filp)
-+{
-+ struct xenbus_dev_data *u = filp->private_data;
-+ struct xenbus_dev_transaction *trans, *tmp;
-+
-+ list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
-+ xenbus_transaction_end(trans->handle, 1);
-+ list_del(&trans->list);
-+ kfree(trans);
-+ }
-+
-+ kfree(u);
-+
-+ return 0;
-+}
-+
-+static struct file_operations xenbus_dev_file_ops = {
-+ .read = xenbus_dev_read,
-+ .write = xenbus_dev_write,
-+ .open = xenbus_dev_open,
-+ .release = xenbus_dev_release,
-+};
-+
-+static int __init
-+xenbus_dev_init(void)
-+{
-+ xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
-+ if (xenbus_dev_intf)
-+ xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
-+
-+ return 0;
-+}
-+
-+__initcall(xenbus_dev_init);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
-new file mode 100644
-index 0000000..d43f9fb
---- /dev/null
-+++ b/drivers/xen/xenbus/xenbus_probe.c
-@@ -0,0 +1,1081 @@
-+/******************************************************************************
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
-+ * Copyright (C) 2005 XenSource Ltd
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#define DPRINTK(fmt, args...) \
-+ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+
-+#include <linux/kernel.h>
-+#include <linux/err.h>
-+#include <linux/string.h>
-+#include <linux/ctype.h>
-+#include <linux/fcntl.h>
-+#include <linux/mm.h>
-+#include <linux/notifier.h>
-+#include <linux/kthread.h>
-+
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <xen/xenbus.h>
-+#include <xen/xen_proc.h>
-+#include <xen/evtchn.h>
-+
-+#include "xenbus_comms.h"
-+
-+extern struct semaphore xenwatch_mutex;
-+
-+#define streq(a, b) (strcmp((a), (b)) == 0)
-+
-+static struct notifier_block *xenstore_chain;
-+
-+/* If something in array of ids matches this device, return it. */
-+static const struct xenbus_device_id *
-+match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
-+{
-+ for (; !streq(arr->devicetype, ""); arr++) {
-+ if (streq(arr->devicetype, dev->devicetype))
-+ return arr;
-+ }
-+ return NULL;
-+}
-+
-+static int xenbus_match(struct device *_dev, struct device_driver *_drv)
-+{
-+ struct xenbus_driver *drv = to_xenbus_driver(_drv);
-+
-+ if (!drv->ids)
-+ return 0;
-+
-+ return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
-+}
-+
-+struct xen_bus_type
-+{
-+ char *root;
-+ unsigned int levels;
-+ int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
-+ int (*probe)(const char *type, const char *dir);
-+ struct bus_type bus;
-+ struct device dev;
-+};
-+
-+
-+/* device/<type>/<id> => <type>-<id> */
-+static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
-+{
-+ nodename = strchr(nodename, '/');
-+ if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
-+ printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
-+ return -EINVAL;
-+ }
-+
-+ strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
-+ if (!strchr(bus_id, '/')) {
-+ printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
-+ return -EINVAL;
-+ }
-+ *strchr(bus_id, '/') = '-';
-+ return 0;
-+}
-+
-+
-+static int read_otherend_details(struct xenbus_device *xendev,
-+ char *id_node, char *path_node)
-+{
-+ int err = xenbus_gather(XBT_NULL, xendev->nodename,
-+ id_node, "%i", &xendev->otherend_id,
-+ path_node, NULL, &xendev->otherend,
-+ NULL);
-+ if (err) {
-+ xenbus_dev_fatal(xendev, err,
-+ "reading other end details from %s",
-+ xendev->nodename);
-+ return err;
-+ }
-+ if (strlen(xendev->otherend) == 0 ||
-+ !xenbus_exists(XBT_NULL, xendev->otherend, "")) {
-+ xenbus_dev_fatal(xendev, -ENOENT, "missing other end from %s",
-+ xendev->nodename);
-+ kfree(xendev->otherend);
-+ xendev->otherend = NULL;
-+ return -ENOENT;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static int read_backend_details(struct xenbus_device *xendev)
-+{
-+ return read_otherend_details(xendev, "backend-id", "backend");
-+}
-+
-+
-+static int read_frontend_details(struct xenbus_device *xendev)
-+{
-+ return read_otherend_details(xendev, "frontend-id", "frontend");
-+}
-+
-+
-+static void free_otherend_details(struct xenbus_device *dev)
-+{
-+ kfree(dev->otherend);
-+ dev->otherend = NULL;
-+}
-+
-+
-+static void free_otherend_watch(struct xenbus_device *dev)
-+{
-+ if (dev->otherend_watch.node) {
-+ unregister_xenbus_watch(&dev->otherend_watch);
-+ kfree(dev->otherend_watch.node);
-+ dev->otherend_watch.node = NULL;
-+ }
-+}
-+
-+
-+/* Bus type for frontend drivers. */
-+static int xenbus_probe_frontend(const char *type, const char *name);
-+static struct xen_bus_type xenbus_frontend = {
-+ .root = "device",
-+ .levels = 2, /* device/type/<id> */
-+ .get_bus_id = frontend_bus_id,
-+ .probe = xenbus_probe_frontend,
-+ .bus = {
-+ .name = "xen",
-+ .match = xenbus_match,
-+ },
-+ .dev = {
-+ .bus_id = "xen",
-+ },
-+};
-+
-+/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
-+static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
-+{
-+ int domid, err;
-+ const char *devid, *type, *frontend;
-+ unsigned int typelen;
-+
-+ type = strchr(nodename, '/');
-+ if (!type)
-+ return -EINVAL;
-+ type++;
-+ typelen = strcspn(type, "/");
-+ if (!typelen || type[typelen] != '/')
-+ return -EINVAL;
-+
-+ devid = strrchr(nodename, '/') + 1;
-+
-+ err = xenbus_gather(XBT_NULL, nodename, "frontend-id", "%i", &domid,
-+ "frontend", NULL, &frontend,
-+ NULL);
-+ if (err)
-+ return err;
-+ if (strlen(frontend) == 0)
-+ err = -ERANGE;
-+ if (!err && !xenbus_exists(XBT_NULL, frontend, ""))
-+ err = -ENOENT;
-+
-+ kfree(frontend);
-+
-+ if (err)
-+ return err;
-+
-+ if (snprintf(bus_id, BUS_ID_SIZE,
-+ "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
-+ return -ENOSPC;
-+ return 0;
-+}
-+
-+static int xenbus_uevent_backend(struct device *dev, char **envp,
-+ int num_envp, char *buffer, int buffer_size);
-+static int xenbus_probe_backend(const char *type, const char *domid);
-+static struct xen_bus_type xenbus_backend = {
-+ .root = "backend",
-+ .levels = 3, /* backend/type/<frontend>/<id> */
-+ .get_bus_id = backend_bus_id,
-+ .probe = xenbus_probe_backend,
-+ .bus = {
-+ .name = "xen-backend",
-+ .match = xenbus_match,
-+ .uevent = xenbus_uevent_backend,
-+ },
-+ .dev = {
-+ .bus_id = "xen-backend",
-+ },
-+};
-+
-+static int xenbus_uevent_backend(struct device *dev, char **envp,
-+ int num_envp, char *buffer, int buffer_size)
-+{
-+ struct xenbus_device *xdev;
-+ struct xenbus_driver *drv;
-+ int i = 0;
-+ int length = 0;
-+
-+ DPRINTK("");
-+
-+ if (dev == NULL)
-+ return -ENODEV;
-+
-+ xdev = to_xenbus_device(dev);
-+ if (xdev == NULL)
-+ return -ENODEV;
-+
-+ /* stuff we want to pass to /sbin/hotplug */
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "XENBUS_TYPE=%s", xdev->devicetype);
-+
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "XENBUS_PATH=%s", xdev->nodename);
-+
-+ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
-+ "XENBUS_BASE_PATH=%s", xenbus_backend.root);
-+
-+ /* terminate, set to next free slot, shrink available space */
-+ envp[i] = NULL;
-+ envp = &envp[i];
-+ num_envp -= i;
-+ buffer = &buffer[length];
-+ buffer_size -= length;
-+
-+ if (dev->driver) {
-+ drv = to_xenbus_driver(dev->driver);
-+ if (drv && drv->uevent)
-+ return drv->uevent(xdev, envp, num_envp, buffer,
-+ buffer_size);
-+ }
-+
-+ return 0;
-+}
-+
-+static void otherend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ struct xenbus_device *dev =
-+ container_of(watch, struct xenbus_device, otherend_watch);
-+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-+ XenbusState state;
-+
-+ /* Protect us against watches firing on old details when the otherend
-+ details change, say immediately after a resume. */
-+ if (!dev->otherend ||
-+ strncmp(dev->otherend, vec[XS_WATCH_PATH],
-+ strlen(dev->otherend))) {
-+ DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
-+ return;
-+ }
-+
-+ state = xenbus_read_driver_state(dev->otherend);
-+
-+ DPRINTK("state is %d, %s, %s",
-+ state, dev->otherend_watch.node, vec[XS_WATCH_PATH]);
-+ if (drv->otherend_changed)
-+ drv->otherend_changed(dev, state);
-+}
-+
-+
-+static int talk_to_otherend(struct xenbus_device *dev)
-+{
-+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-+
-+ free_otherend_watch(dev);
-+ free_otherend_details(dev);
-+
-+ return drv->read_otherend_details(dev);
-+}
-+
-+
-+static int watch_otherend(struct xenbus_device *dev)
-+{
-+ return xenbus_watch_path2(dev, dev->otherend, "state",
-+ &dev->otherend_watch, otherend_changed);
-+}
-+
-+
-+static int xenbus_dev_probe(struct device *_dev)
-+{
-+ struct xenbus_device *dev = to_xenbus_device(_dev);
-+ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
-+ const struct xenbus_device_id *id;
-+ int err;
-+
-+ DPRINTK("");
-+
-+ if (!drv->probe) {
-+ err = -ENODEV;
-+ goto fail;
-+ }
-+
-+ id = match_device(drv->ids, dev);
-+ if (!id) {
-+ err = -ENODEV;
-+ goto fail;
-+ }
-+
-+ err = talk_to_otherend(dev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus_probe: talk_to_otherend on %s failed.\n",
-+ dev->nodename);
-+ return err;
-+ }
-+
-+ err = drv->probe(dev, id);
-+ if (err)
-+ goto fail;
-+
-+ err = watch_otherend(dev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus_probe: watch_otherend on %s failed.\n",
-+ dev->nodename);
-+ return err;
-+ }
-+
-+ return 0;
-+fail:
-+ xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
-+ return -ENODEV;
-+}
-+
-+static int xenbus_dev_remove(struct device *_dev)
-+{
-+ struct xenbus_device *dev = to_xenbus_device(_dev);
-+ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
-+
-+ DPRINTK("");
-+
-+ free_otherend_watch(dev);
-+ free_otherend_details(dev);
-+
-+ if (drv->remove)
-+ drv->remove(dev);
-+
-+ xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
-+ return 0;
-+}
-+
-+static int xenbus_register_driver_common(struct xenbus_driver *drv,
-+ struct xen_bus_type *bus)
-+{
-+ int ret;
-+
-+ drv->driver.name = drv->name;
-+ drv->driver.bus = &bus->bus;
-+ drv->driver.owner = drv->owner;
-+ drv->driver.probe = xenbus_dev_probe;
-+ drv->driver.remove = xenbus_dev_remove;
-+
-+ down(&xenwatch_mutex);
-+ ret = driver_register(&drv->driver);
-+ up(&xenwatch_mutex);
-+ return ret;
-+}
-+
-+int xenbus_register_frontend(struct xenbus_driver *drv)
-+{
-+ drv->read_otherend_details = read_backend_details;
-+
-+ return xenbus_register_driver_common(drv, &xenbus_frontend);
-+}
-+EXPORT_SYMBOL(xenbus_register_frontend);
-+
-+int xenbus_register_backend(struct xenbus_driver *drv)
-+{
-+ drv->read_otherend_details = read_frontend_details;
-+
-+ return xenbus_register_driver_common(drv, &xenbus_backend);
-+}
-+EXPORT_SYMBOL(xenbus_register_backend);
-+
-+void xenbus_unregister_driver(struct xenbus_driver *drv)
-+{
-+ driver_unregister(&drv->driver);
-+}
-+EXPORT_SYMBOL(xenbus_unregister_driver);
-+
-+struct xb_find_info
-+{
-+ struct xenbus_device *dev;
-+ const char *nodename;
-+};
-+
-+static int cmp_dev(struct device *dev, void *data)
-+{
-+ struct xenbus_device *xendev = to_xenbus_device(dev);
-+ struct xb_find_info *info = data;
-+
-+ if (streq(xendev->nodename, info->nodename)) {
-+ info->dev = xendev;
-+ get_device(dev);
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+struct xenbus_device *xenbus_device_find(const char *nodename,
-+ struct bus_type *bus)
-+{
-+ struct xb_find_info info = { .dev = NULL, .nodename = nodename };
-+
-+ bus_for_each_dev(bus, NULL, &info, cmp_dev);
-+ return info.dev;
-+}
-+
-+static int cleanup_dev(struct device *dev, void *data)
-+{
-+ struct xenbus_device *xendev = to_xenbus_device(dev);
-+ struct xb_find_info *info = data;
-+ int len = strlen(info->nodename);
-+
-+ DPRINTK("%s", info->nodename);
-+
-+ /* Match the info->nodename path, or any subdirectory of that path. */
-+ if (strncmp(xendev->nodename, info->nodename, len))
-+ return 0;
-+
-+ /* If the node name is longer, ensure it really is a subdirectory. */
-+ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
-+ return 0;
-+
-+ info->dev = xendev;
-+ get_device(dev);
-+ return 1;
-+}
-+
-+static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
-+{
-+ struct xb_find_info info = { .nodename = path };
-+
-+ do {
-+ info.dev = NULL;
-+ bus_for_each_dev(bus, NULL, &info, cleanup_dev);
-+ if (info.dev) {
-+ device_unregister(&info.dev->dev);
-+ put_device(&info.dev->dev);
-+ }
-+ } while (info.dev);
-+}
-+
-+static void xenbus_dev_free(struct xenbus_device *xendev)
-+{
-+ kfree(xendev);
-+}
-+
-+static void xenbus_dev_release(struct device *dev)
-+{
-+ if (dev)
-+ xenbus_dev_free(to_xenbus_device(dev));
-+}
-+
-+/* Simplified asprintf. */
-+char *kasprintf(const char *fmt, ...)
-+{
-+ va_list ap;
-+ unsigned int len;
-+ char *p, dummy[1];
-+
-+ va_start(ap, fmt);
-+ /* FIXME: vsnprintf has a bug, NULL should work */
-+ len = vsnprintf(dummy, 0, fmt, ap);
-+ va_end(ap);
-+
-+ p = kmalloc(len + 1, GFP_KERNEL);
-+ if (!p)
-+ return NULL;
-+ va_start(ap, fmt);
-+ vsprintf(p, fmt, ap);
-+ va_end(ap);
-+ return p;
-+}
-+
-+static ssize_t xendev_show_nodename(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
-+}
-+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
-+
-+static ssize_t xendev_show_devtype(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
-+}
-+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
-+
-+
-+static int xenbus_probe_node(struct xen_bus_type *bus,
-+ const char *type,
-+ const char *nodename)
-+{
-+ int err;
-+ struct xenbus_device *xendev;
-+ size_t stringlen;
-+ char *tmpstring;
-+
-+ XenbusState state = xenbus_read_driver_state(nodename);
-+
-+ if (state != XenbusStateInitialising) {
-+ /* Device is not new, so ignore it. This can happen if a
-+ device is going away after switching to Closed. */
-+ return 0;
-+ }
-+
-+ stringlen = strlen(nodename) + 1 + strlen(type) + 1;
-+ xendev = kmalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
-+ if (!xendev)
-+ return -ENOMEM;
-+ memset(xendev, 0, sizeof(*xendev));
-+
-+ /* Copy the strings into the extra space. */
-+
-+ tmpstring = (char *)(xendev + 1);
-+ strcpy(tmpstring, nodename);
-+ xendev->nodename = tmpstring;
-+
-+ tmpstring += strlen(tmpstring) + 1;
-+ strcpy(tmpstring, type);
-+ xendev->devicetype = tmpstring;
-+
-+ xendev->dev.parent = &bus->dev;
-+ xendev->dev.bus = &bus->bus;
-+ xendev->dev.release = xenbus_dev_release;
-+
-+ err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
-+ if (err)
-+ goto fail;
-+
-+ /* Register with generic device framework. */
-+ err = device_register(&xendev->dev);
-+ if (err)
-+ goto fail;
-+
-+ device_create_file(&xendev->dev, &dev_attr_nodename);
-+ device_create_file(&xendev->dev, &dev_attr_devtype);
-+
-+ return 0;
-+fail:
-+ xenbus_dev_free(xendev);
-+ return err;
-+}
-+
-+/* device/<typename>/<name> */
-+static int xenbus_probe_frontend(const char *type, const char *name)
-+{
-+ char *nodename;
-+ int err;
-+
-+ nodename = kasprintf("%s/%s/%s", xenbus_frontend.root, type, name);
-+ if (!nodename)
-+ return -ENOMEM;
-+
-+ DPRINTK("%s", nodename);
-+
-+ err = xenbus_probe_node(&xenbus_frontend, type, nodename);
-+ kfree(nodename);
-+ return err;
-+}
-+
-+/* backend/<typename>/<frontend-uuid>/<name> */
-+static int xenbus_probe_backend_unit(const char *dir,
-+ const char *type,
-+ const char *name)
-+{
-+ char *nodename;
-+ int err;
-+
-+ nodename = kasprintf("%s/%s", dir, name);
-+ if (!nodename)
-+ return -ENOMEM;
-+
-+ DPRINTK("%s\n", nodename);
-+
-+ err = xenbus_probe_node(&xenbus_backend, type, nodename);
-+ kfree(nodename);
-+ return err;
-+}
-+
-+/* backend/<typename>/<frontend-domid> */
-+static int xenbus_probe_backend(const char *type, const char *domid)
-+{
-+ char *nodename;
-+ int err = 0;
-+ char **dir;
-+ unsigned int i, dir_n = 0;
-+
-+ DPRINTK("");
-+
-+ nodename = kasprintf("%s/%s/%s", xenbus_backend.root, type, domid);
-+ if (!nodename)
-+ return -ENOMEM;
-+
-+ dir = xenbus_directory(XBT_NULL, nodename, "", &dir_n);
-+ if (IS_ERR(dir)) {
-+ kfree(nodename);
-+ return PTR_ERR(dir);
-+ }
-+
-+ for (i = 0; i < dir_n; i++) {
-+ err = xenbus_probe_backend_unit(nodename, type, dir[i]);
-+ if (err)
-+ break;
-+ }
-+ kfree(dir);
-+ kfree(nodename);
-+ return err;
-+}
-+
-+static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
-+{
-+ int err = 0;
-+ char **dir;
-+ unsigned int dir_n = 0;
-+ int i;
-+
-+ dir = xenbus_directory(XBT_NULL, bus->root, type, &dir_n);
-+ if (IS_ERR(dir))
-+ return PTR_ERR(dir);
-+
-+ for (i = 0; i < dir_n; i++) {
-+ err = bus->probe(type, dir[i]);
-+ if (err)
-+ break;
-+ }
-+ kfree(dir);
-+ return err;
-+}
-+
-+static int xenbus_probe_devices(struct xen_bus_type *bus)
-+{
-+ int err = 0;
-+ char **dir;
-+ unsigned int i, dir_n;
-+
-+ dir = xenbus_directory(XBT_NULL, bus->root, "", &dir_n);
-+ if (IS_ERR(dir))
-+ return PTR_ERR(dir);
-+
-+ for (i = 0; i < dir_n; i++) {
-+ err = xenbus_probe_device_type(bus, dir[i]);
-+ if (err)
-+ break;
-+ }
-+ kfree(dir);
-+ return err;
-+}
-+
-+static unsigned int char_count(const char *str, char c)
-+{
-+ unsigned int i, ret = 0;
-+
-+ for (i = 0; str[i]; i++)
-+ if (str[i] == c)
-+ ret++;
-+ return ret;
-+}
-+
-+static int strsep_len(const char *str, char c, unsigned int len)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; str[i]; i++)
-+ if (str[i] == c) {
-+ if (len == 0)
-+ return i;
-+ len--;
-+ }
-+ return (len == 0) ? i : -ERANGE;
-+}
-+
-+static void dev_changed(const char *node, struct xen_bus_type *bus)
-+{
-+ int exists, rootlen;
-+ struct xenbus_device *dev;
-+ char type[BUS_ID_SIZE];
-+ const char *p, *root;
-+
-+ if (char_count(node, '/') < 2)
-+ return;
-+
-+ exists = xenbus_exists(XBT_NULL, node, "");
-+ if (!exists) {
-+ xenbus_cleanup_devices(node, &bus->bus);
-+ return;
-+ }
-+
-+ /* backend/<type>/... or device/<type>/... */
-+ p = strchr(node, '/') + 1;
-+ snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
-+ type[BUS_ID_SIZE-1] = '\0';
-+
-+ rootlen = strsep_len(node, '/', bus->levels);
-+ if (rootlen < 0)
-+ return;
-+ root = kasprintf("%.*s", rootlen, node);
-+ if (!root)
-+ return;
-+
-+ dev = xenbus_device_find(root, &bus->bus);
-+ if (!dev)
-+ xenbus_probe_node(bus, type, root);
-+ else
-+ put_device(&dev->dev);
-+
-+ kfree(root);
-+}
-+
-+static void frontend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ DPRINTK("");
-+
-+ dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
-+}
-+
-+static void backend_changed(struct xenbus_watch *watch,
-+ const char **vec, unsigned int len)
-+{
-+ DPRINTK("");
-+
-+ dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
-+}
-+
-+/* We watch for devices appearing and vanishing. */
-+static struct xenbus_watch fe_watch = {
-+ .node = "device",
-+ .callback = frontend_changed,
-+};
-+
-+static struct xenbus_watch be_watch = {
-+ .node = "backend",
-+ .callback = backend_changed,
-+};
-+
-+static int suspend_dev(struct device *dev, void *data)
-+{
-+ int err = 0;
-+ struct xenbus_driver *drv;
-+ struct xenbus_device *xdev;
-+
-+ DPRINTK("");
-+
-+ if (dev->driver == NULL)
-+ return 0;
-+ drv = to_xenbus_driver(dev->driver);
-+ xdev = container_of(dev, struct xenbus_device, dev);
-+ if (drv->suspend)
-+ err = drv->suspend(xdev);
-+ if (err)
-+ printk(KERN_WARNING
-+ "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
-+ return 0;
-+}
-+
-+static int resume_dev(struct device *dev, void *data)
-+{
-+ int err;
-+ struct xenbus_driver *drv;
-+ struct xenbus_device *xdev;
-+
-+ DPRINTK("");
-+
-+ if (dev->driver == NULL)
-+ return 0;
-+ drv = to_xenbus_driver(dev->driver);
-+ xdev = container_of(dev, struct xenbus_device, dev);
-+
-+ err = talk_to_otherend(xdev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus: resume (talk_to_otherend) %s failed: %i\n",
-+ dev->bus_id, err);
-+ return err;
-+ }
-+
-+ err = watch_otherend(xdev);
-+ if (err) {
-+ printk(KERN_WARNING
-+ "xenbus_probe: resume (watch_otherend) %s failed: "
-+ "%d.\n", dev->bus_id, err);
-+ return err;
-+ }
-+
-+ if (drv->resume)
-+ err = drv->resume(xdev);
-+ if (err)
-+ printk(KERN_WARNING
-+ "xenbus: resume %s failed: %i\n", dev->bus_id, err);
-+ return err;
-+}
-+
-+void xenbus_suspend(void)
-+{
-+ DPRINTK("");
-+
-+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
-+ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, suspend_dev);
-+ xs_suspend();
-+}
-+EXPORT_SYMBOL(xenbus_suspend);
-+
-+void xenbus_resume(void)
-+{
-+ xb_init_comms();
-+ xs_resume();
-+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
-+ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, resume_dev);
-+}
-+EXPORT_SYMBOL(xenbus_resume);
-+
-+
-+/* A flag to determine if xenstored is 'ready' (i.e. has started) */
-+int xenstored_ready = 0;
-+
-+
-+int register_xenstore_notifier(struct notifier_block *nb)
-+{
-+ int ret = 0;
-+
-+ if (xenstored_ready > 0)
-+ ret = nb->notifier_call(nb, 0, NULL);
-+ else
-+ notifier_chain_register(&xenstore_chain, nb);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(register_xenstore_notifier);
-+
-+void unregister_xenstore_notifier(struct notifier_block *nb)
-+{
-+ notifier_chain_unregister(&xenstore_chain, nb);
-+}
-+EXPORT_SYMBOL(unregister_xenstore_notifier);
-+
-+
-+static int all_devices_ready_(struct device *dev, void *data)
-+{
-+ struct xenbus_device *xendev = to_xenbus_device(dev);
-+ int *result = data;
-+
-+ if (xendev->state != XenbusStateConnected) {
-+ result = 0;
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static int all_devices_ready(void)
-+{
-+ int ready = 1;
-+ bus_for_each_dev(&xenbus_frontend.bus, NULL, &ready,
-+ all_devices_ready_);
-+ return ready;
-+}
-+
-+
-+void xenbus_probe(void *unused)
-+{
-+ int i;
-+
-+ BUG_ON((xenstored_ready <= 0));
-+
-+ /* Enumerate devices in xenstore. */
-+ xenbus_probe_devices(&xenbus_frontend);
-+ xenbus_probe_devices(&xenbus_backend);
-+
-+ /* Watch for changes. */
-+ register_xenbus_watch(&fe_watch);
-+ register_xenbus_watch(&be_watch);
-+
-+ /* Notify others that xenstore is up */
-+ notifier_call_chain(&xenstore_chain, 0, NULL);
-+
-+ /* On a 10 second timeout, waiting for all devices currently
-+ configured. We need to do this to guarantee that the filesystems
-+ and / or network devices needed for boot are available, before we
-+ can allow the boot to proceed.
-+
-+ A possible improvement here would be to have the tools add a
-+ per-device flag to the store entry, indicating whether it is needed
-+ at boot time. This would allow people who knew what they were
-+ doing to accelerate their boot slightly, but of course needs tools
-+ or manual intervention to set up those flags correctly.
-+ */
-+ for (i = 0; i < 10 * HZ; i++) {
-+ if (all_devices_ready())
-+ return;
-+
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ schedule_timeout(1);
-+ }
-+
-+ printk(KERN_WARNING
-+ "XENBUS: Timeout connecting to devices!\n");
-+}
-+
-+
-+static struct file_operations xsd_kva_fops;
-+static struct proc_dir_entry *xsd_kva_intf;
-+static struct proc_dir_entry *xsd_port_intf;
-+
-+static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
-+{
-+ size_t size = vma->vm_end - vma->vm_start;
-+
-+ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
-+ return -EINVAL;
-+
-+ vma->vm_pgoff = mfn_to_pfn(xen_start_info->store_mfn);
-+
-+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-+ size, vma->vm_page_prot))
-+ return -EAGAIN;
-+
-+ return 0;
-+}
-+
-+static int xsd_kva_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ len = sprintf(page, "0x%p", mfn_to_virt(xen_start_info->store_mfn));
-+ *eof = 1;
-+ return len;
-+}
-+
-+static int xsd_port_read(char *page, char **start, off_t off,
-+ int count, int *eof, void *data)
-+{
-+ int len;
-+
-+ len = sprintf(page, "%d", xen_start_info->store_evtchn);
-+ *eof = 1;
-+ return len;
-+}
-+
-+
-+static int __init xenbus_probe_init(void)
-+{
-+ int err = 0, dom0;
-+
-+ DPRINTK("");
-+
-+ if (xen_init() < 0) {
-+ DPRINTK("failed");
-+ return -ENODEV;
-+ }
-+
-+ /* Register ourselves with the kernel bus & device subsystems */
-+ bus_register(&xenbus_frontend.bus);
-+ bus_register(&xenbus_backend.bus);
-+ device_register(&xenbus_frontend.dev);
-+ device_register(&xenbus_backend.dev);
-+
-+ /*
-+ * Domain0 doesn't have a store_evtchn or store_mfn yet.
-+ */
-+ dom0 = (xen_start_info->store_evtchn == 0);
-+
-+ if (dom0) {
-+
-+ unsigned long page;
-+ evtchn_op_t op = { 0 };
-+ int ret;
-+
-+
-+ /* Allocate page. */
-+ page = get_zeroed_page(GFP_KERNEL);
-+ if (!page)
-+ return -ENOMEM;
-+
-+ /* We don't refcnt properly, so set reserved on page.
-+ * (this allocation is permanent) */
-+ SetPageReserved(virt_to_page(page));
-+
-+ xen_start_info->store_mfn =
-+ pfn_to_mfn(virt_to_phys((void *)page) >>
-+ PAGE_SHIFT);
-+
-+ /* Next allocate a local port which xenstored can bind to */
-+ op.cmd = EVTCHNOP_alloc_unbound;
-+ op.u.alloc_unbound.dom = DOMID_SELF;
-+ op.u.alloc_unbound.remote_dom = 0;
-+
-+ ret = HYPERVISOR_event_channel_op(&op);
-+ BUG_ON(ret);
-+ xen_start_info->store_evtchn = op.u.alloc_unbound.port;
-+
-+ /* And finally publish the above info in /proc/xen */
-+ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0400);
-+ if (xsd_kva_intf) {
-+ memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
-+ sizeof(xsd_kva_fops));
-+ xsd_kva_fops.mmap = xsd_kva_mmap;
-+ xsd_kva_intf->proc_fops = &xsd_kva_fops;
-+ xsd_kva_intf->read_proc = xsd_kva_read;
-+ }
-+ xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
-+ if (xsd_port_intf)
-+ xsd_port_intf->read_proc = xsd_port_read;
-+ }
-+
-+ /* Initialize the interface to xenstore. */
-+ err = xs_init();
-+ if (err) {
-+ printk(KERN_WARNING
-+ "XENBUS: Error initializing xenstore comms: %i\n", err);
-+ return err;
-+ }
-+
-+ if (!dom0) {
-+ xenstored_ready = 1;
-+ xenbus_probe(NULL);
-+ }
-+
-+ return 0;
-+}
-+
-+postcore_initcall(xenbus_probe_init);
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
-new file mode 100644
-index 0000000..ad43b0c
---- /dev/null
-+++ b/drivers/xen/xenbus/xenbus_xs.c
-@@ -0,0 +1,830 @@
-+/******************************************************************************
-+ * xenbus_xs.c
-+ *
-+ * This is the kernel equivalent of the "xs" library. We don't need everything
-+ * and we use xenbus_comms for communication.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/unistd.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/uio.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/err.h>
-+#include <linux/slab.h>
-+#include <linux/fcntl.h>
-+#include <linux/kthread.h>
-+#include <xen/xenbus.h>
-+#include "xenbus_comms.h"
-+
-+/* xenbus_probe.c */
-+extern char *kasprintf(const char *fmt, ...);
-+
-+#define streq(a, b) (strcmp((a), (b)) == 0)
-+
-+struct xs_stored_msg {
-+ struct list_head list;
-+
-+ struct xsd_sockmsg hdr;
-+
-+ union {
-+ /* Queued replies. */
-+ struct {
-+ char *body;
-+ } reply;
-+
-+ /* Queued watch events. */
-+ struct {
-+ struct xenbus_watch *handle;
-+ char **vec;
-+ unsigned int vec_size;
-+ } watch;
-+ } u;
-+};
-+
-+struct xs_handle {
-+ /* A list of replies. Currently only one will ever be outstanding. */
-+ struct list_head reply_list;
-+ spinlock_t reply_lock;
-+ wait_queue_head_t reply_waitq;
-+
-+ /* One request at a time. */
-+ struct semaphore request_mutex;
-+
-+ /* Protect transactions against save/restore. */
-+ struct rw_semaphore suspend_mutex;
-+};
-+
-+static struct xs_handle xs_state;
-+
-+/* List of registered watches, and a lock to protect it. */
-+static LIST_HEAD(watches);
-+static DEFINE_SPINLOCK(watches_lock);
-+
-+/* List of pending watch callback events, and a lock to protect it. */
-+static LIST_HEAD(watch_events);
-+static DEFINE_SPINLOCK(watch_events_lock);
-+
-+/*
-+ * Details of the xenwatch callback kernel thread. The thread waits on the
-+ * watch_events_waitq for work to do (queued on watch_events list). When it
-+ * wakes up it acquires the xenwatch_mutex before reading the list and
-+ * carrying out work.
-+ */
-+static pid_t xenwatch_pid;
-+/* static */ DECLARE_MUTEX(xenwatch_mutex);
-+static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
-+
-+static int get_error(const char *errorstring)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; !streq(errorstring, xsd_errors[i].errstring); i++) {
-+ if (i == ARRAY_SIZE(xsd_errors) - 1) {
-+ printk(KERN_WARNING
-+ "XENBUS xen store gave: unknown error %s",
-+ errorstring);
-+ return EINVAL;
-+ }
-+ }
-+ return xsd_errors[i].errnum;
-+}
-+
-+static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
-+{
-+ struct xs_stored_msg *msg;
-+ char *body;
-+
-+ spin_lock(&xs_state.reply_lock);
-+
-+ while (list_empty(&xs_state.reply_list)) {
-+ spin_unlock(&xs_state.reply_lock);
-+ /* XXX FIXME: Avoid synchronous wait for response here. */
-+ wait_event(xs_state.reply_waitq,
-+ !list_empty(&xs_state.reply_list));
-+ spin_lock(&xs_state.reply_lock);
-+ }
-+
-+ msg = list_entry(xs_state.reply_list.next,
-+ struct xs_stored_msg, list);
-+ list_del(&msg->list);
-+
-+ spin_unlock(&xs_state.reply_lock);
-+
-+ *type = msg->hdr.type;
-+ if (len)
-+ *len = msg->hdr.len;
-+ body = msg->u.reply.body;
-+
-+ kfree(msg);
-+
-+ return body;
-+}
-+
-+/* Emergency write. */
-+void xenbus_debug_write(const char *str, unsigned int count)
-+{
-+ struct xsd_sockmsg msg = { 0 };
-+
-+ msg.type = XS_DEBUG;
-+ msg.len = sizeof("print") + count + 1;
-+
-+ down(&xs_state.request_mutex);
-+ xb_write(&msg, sizeof(msg));
-+ xb_write("print", sizeof("print"));
-+ xb_write(str, count);
-+ xb_write("", 1);
-+ up(&xs_state.request_mutex);
-+}
-+
-+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
-+{
-+ void *ret;
-+ struct xsd_sockmsg req_msg = *msg;
-+ int err;
-+
-+ if (req_msg.type == XS_TRANSACTION_START)
-+ down_read(&xs_state.suspend_mutex);
-+
-+ down(&xs_state.request_mutex);
-+
-+ err = xb_write(msg, sizeof(*msg) + msg->len);
-+ if (err) {
-+ msg->type = XS_ERROR;
-+ ret = ERR_PTR(err);
-+ } else
-+ ret = read_reply(&msg->type, &msg->len);
-+
-+ up(&xs_state.request_mutex);
-+
-+ if ((msg->type == XS_TRANSACTION_END) ||
-+ ((req_msg.type == XS_TRANSACTION_START) &&
-+ (msg->type == XS_ERROR)))
-+ up_read(&xs_state.suspend_mutex);
-+
-+ return ret;
-+}
-+
-+/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
-+static void *xs_talkv(xenbus_transaction_t t,
-+ enum xsd_sockmsg_type type,
-+ const struct kvec *iovec,
-+ unsigned int num_vecs,
-+ unsigned int *len)
-+{
-+ struct xsd_sockmsg msg;
-+ void *ret = NULL;
-+ unsigned int i;
-+ int err;
-+
-+ msg.tx_id = t;
-+ msg.req_id = 0;
-+ msg.type = type;
-+ msg.len = 0;
-+ for (i = 0; i < num_vecs; i++)
-+ msg.len += iovec[i].iov_len;
-+
-+ down(&xs_state.request_mutex);
-+
-+ err = xb_write(&msg, sizeof(msg));
-+ if (err) {
-+ up(&xs_state.request_mutex);
-+ return ERR_PTR(err);
-+ }
-+
-+ for (i = 0; i < num_vecs; i++) {
-+ err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
-+ if (err) {
-+ up(&xs_state.request_mutex);
-+ return ERR_PTR(err);
-+ }
-+ }
-+
-+ ret = read_reply(&msg.type, len);
-+
-+ up(&xs_state.request_mutex);
-+
-+ if (IS_ERR(ret))
-+ return ret;
-+
-+ if (msg.type == XS_ERROR) {
-+ err = get_error(ret);
-+ kfree(ret);
-+ return ERR_PTR(-err);
-+ }
-+
-+ BUG_ON(msg.type != type);
-+ return ret;
-+}
-+
-+/* Simplified version of xs_talkv: single message. */
-+static void *xs_single(xenbus_transaction_t t,
-+ enum xsd_sockmsg_type type,
-+ const char *string,
-+ unsigned int *len)
-+{
-+ struct kvec iovec;
-+
-+ iovec.iov_base = (void *)string;
-+ iovec.iov_len = strlen(string) + 1;
-+ return xs_talkv(t, type, &iovec, 1, len);
-+}
-+
-+/* Many commands only need an ack, don't care what it says. */
-+static int xs_error(char *reply)
-+{
-+ if (IS_ERR(reply))
-+ return PTR_ERR(reply);
-+ kfree(reply);
-+ return 0;
-+}
-+
-+static unsigned int count_strings(const char *strings, unsigned int len)
-+{
-+ unsigned int num;
-+ const char *p;
-+
-+ for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
-+ num++;
-+
-+ return num;
-+}
-+
-+/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
-+static char *join(const char *dir, const char *name)
-+{
-+ char *buffer;
-+
-+ if (strlen(name) == 0)
-+ buffer = kasprintf("%s", dir);
-+ else
-+ buffer = kasprintf("%s/%s", dir, name);
-+ return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
-+}
-+
-+static char **split(char *strings, unsigned int len, unsigned int *num)
-+{
-+ char *p, **ret;
-+
-+ /* Count the strings. */
-+ *num = count_strings(strings, len);
-+
-+ /* Transfer to one big alloc for easy freeing. */
-+ ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
-+ if (!ret) {
-+ kfree(strings);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ memcpy(&ret[*num], strings, len);
-+ kfree(strings);
-+
-+ strings = (char *)&ret[*num];
-+ for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
-+ ret[(*num)++] = p;
-+
-+ return ret;
-+}
-+
-+char **xenbus_directory(xenbus_transaction_t t,
-+ const char *dir, const char *node, unsigned int *num)
-+{
-+ char *strings, *path;
-+ unsigned int len;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return (char **)path;
-+
-+ strings = xs_single(t, XS_DIRECTORY, path, &len);
-+ kfree(path);
-+ if (IS_ERR(strings))
-+ return (char **)strings;
-+
-+ return split(strings, len, num);
-+}
-+EXPORT_SYMBOL(xenbus_directory);
-+
-+/* Check if a path exists. Return 1 if it does. */
-+int xenbus_exists(xenbus_transaction_t t,
-+ const char *dir, const char *node)
-+{
-+ char **d;
-+ int dir_n;
-+
-+ d = xenbus_directory(t, dir, node, &dir_n);
-+ if (IS_ERR(d))
-+ return 0;
-+ kfree(d);
-+ return 1;
-+}
-+EXPORT_SYMBOL(xenbus_exists);
-+
-+/* Get the value of a single file.
-+ * Returns a kmalloced value: call free() on it after use.
-+ * len indicates length in bytes.
-+ */
-+void *xenbus_read(xenbus_transaction_t t,
-+ const char *dir, const char *node, unsigned int *len)
-+{
-+ char *path;
-+ void *ret;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return (void *)path;
-+
-+ ret = xs_single(t, XS_READ, path, len);
-+ kfree(path);
-+ return ret;
-+}
-+EXPORT_SYMBOL(xenbus_read);
-+
-+/* Write the value of a single file.
-+ * Returns -err on failure.
-+ */
-+int xenbus_write(xenbus_transaction_t t,
-+ const char *dir, const char *node, const char *string)
-+{
-+ const char *path;
-+ struct kvec iovec[2];
-+ int ret;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return PTR_ERR(path);
-+
-+ iovec[0].iov_base = (void *)path;
-+ iovec[0].iov_len = strlen(path) + 1;
-+ iovec[1].iov_base = (void *)string;
-+ iovec[1].iov_len = strlen(string);
-+
-+ ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
-+ kfree(path);
-+ return ret;
-+}
-+EXPORT_SYMBOL(xenbus_write);
-+
-+/* Create a new directory. */
-+int xenbus_mkdir(xenbus_transaction_t t,
-+ const char *dir, const char *node)
-+{
-+ char *path;
-+ int ret;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return PTR_ERR(path);
-+
-+ ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
-+ kfree(path);
-+ return ret;
-+}
-+EXPORT_SYMBOL(xenbus_mkdir);
-+
-+/* Destroy a file or directory (directories must be empty). */
-+int xenbus_rm(xenbus_transaction_t t, const char *dir, const char *node)
-+{
-+ char *path;
-+ int ret;
-+
-+ path = join(dir, node);
-+ if (IS_ERR(path))
-+ return PTR_ERR(path);
-+
-+ ret = xs_error(xs_single(t, XS_RM, path, NULL));
-+ kfree(path);
-+ return ret;
-+}
-+EXPORT_SYMBOL(xenbus_rm);
-+
-+/* Start a transaction: changes by others will not be seen during this
-+ * transaction, and changes will not be visible to others until end.
-+ */
-+int xenbus_transaction_start(xenbus_transaction_t *t)
-+{
-+ char *id_str;
-+
-+ down_read(&xs_state.suspend_mutex);
-+
-+ id_str = xs_single(XBT_NULL, XS_TRANSACTION_START, "", NULL);
-+ if (IS_ERR(id_str)) {
-+ up_read(&xs_state.suspend_mutex);
-+ return PTR_ERR(id_str);
-+ }
-+
-+ *t = simple_strtoul(id_str, NULL, 0);
-+ kfree(id_str);
-+ return 0;
-+}
-+EXPORT_SYMBOL(xenbus_transaction_start);
-+
-+/* End a transaction.
-+ * If abandon is true, transaction is discarded instead of committed.
-+ */
-+int xenbus_transaction_end(xenbus_transaction_t t, int abort)
-+{
-+ char abortstr[2];
-+ int err;
-+
-+ if (abort)
-+ strcpy(abortstr, "F");
-+ else
-+ strcpy(abortstr, "T");
-+
-+ err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
-+
-+ up_read(&xs_state.suspend_mutex);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(xenbus_transaction_end);
-+
-+/* Single read and scanf: returns -errno or num scanned. */
-+int xenbus_scanf(xenbus_transaction_t t,
-+ const char *dir, const char *node, const char *fmt, ...)
-+{
-+ va_list ap;
-+ int ret;
-+ char *val;
-+
-+ val = xenbus_read(t, dir, node, NULL);
-+ if (IS_ERR(val))
-+ return PTR_ERR(val);
-+
-+ va_start(ap, fmt);
-+ ret = vsscanf(val, fmt, ap);
-+ va_end(ap);
-+ kfree(val);
-+ /* Distinctive errno. */
-+ if (ret == 0)
-+ return -ERANGE;
-+ return ret;
-+}
-+EXPORT_SYMBOL(xenbus_scanf);
-+
-+/* Single printf and write: returns -errno or 0. */
-+int xenbus_printf(xenbus_transaction_t t,
-+ const char *dir, const char *node, const char *fmt, ...)
-+{
-+ va_list ap;
-+ int ret;
-+#define PRINTF_BUFFER_SIZE 4096
-+ char *printf_buffer;
-+
-+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-+ if (printf_buffer == NULL)
-+ return -ENOMEM;
-+
-+ va_start(ap, fmt);
-+ ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
-+ va_end(ap);
-+
-+ BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
-+ ret = xenbus_write(t, dir, node, printf_buffer);
-+
-+ kfree(printf_buffer);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(xenbus_printf);
-+
-+/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
-+int xenbus_gather(xenbus_transaction_t t, const char *dir, ...)
-+{
-+ va_list ap;
-+ const char *name;
-+ int ret = 0;
-+
-+ va_start(ap, dir);
-+ while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
-+ const char *fmt = va_arg(ap, char *);
-+ void *result = va_arg(ap, void *);
-+ char *p;
-+
-+ p = xenbus_read(t, dir, name, NULL);
-+ if (IS_ERR(p)) {
-+ ret = PTR_ERR(p);
-+ break;
-+ }
-+ if (fmt) {
-+ if (sscanf(p, fmt, result) == 0)
-+ ret = -EINVAL;
-+ kfree(p);
-+ } else
-+ *(char **)result = p;
-+ }
-+ va_end(ap);
-+ return ret;
-+}
-+EXPORT_SYMBOL(xenbus_gather);
-+
-+static int xs_watch(const char *path, const char *token)
-+{
-+ struct kvec iov[2];
-+
-+ iov[0].iov_base = (void *)path;
-+ iov[0].iov_len = strlen(path) + 1;
-+ iov[1].iov_base = (void *)token;
-+ iov[1].iov_len = strlen(token) + 1;
-+
-+ return xs_error(xs_talkv(XBT_NULL, XS_WATCH, iov,
-+ ARRAY_SIZE(iov), NULL));
-+}
-+
-+static int xs_unwatch(const char *path, const char *token)
-+{
-+ struct kvec iov[2];
-+
-+ iov[0].iov_base = (char *)path;
-+ iov[0].iov_len = strlen(path) + 1;
-+ iov[1].iov_base = (char *)token;
-+ iov[1].iov_len = strlen(token) + 1;
-+
-+ return xs_error(xs_talkv(XBT_NULL, XS_UNWATCH, iov,
-+ ARRAY_SIZE(iov), NULL));
-+}
-+
-+static struct xenbus_watch *find_watch(const char *token)
-+{
-+ struct xenbus_watch *i, *cmp;
-+
-+ cmp = (void *)simple_strtoul(token, NULL, 16);
-+
-+ list_for_each_entry(i, &watches, list)
-+ if (i == cmp)
-+ return i;
-+
-+ return NULL;
-+}
-+
-+/* Register callback to watch this node. */
-+int register_xenbus_watch(struct xenbus_watch *watch)
-+{
-+ /* Pointer in ascii is the token. */
-+ char token[sizeof(watch) * 2 + 1];
-+ int err;
-+
-+ sprintf(token, "%lX", (long)watch);
-+
-+ down_read(&xs_state.suspend_mutex);
-+
-+ spin_lock(&watches_lock);
-+ BUG_ON(find_watch(token));
-+ list_add(&watch->list, &watches);
-+ spin_unlock(&watches_lock);
-+
-+ err = xs_watch(watch->node, token);
-+
-+ /* Ignore errors due to multiple registration. */
-+ if ((err != 0) && (err != -EEXIST)) {
-+ spin_lock(&watches_lock);
-+ list_del(&watch->list);
-+ spin_unlock(&watches_lock);
-+ }
-+
-+ up_read(&xs_state.suspend_mutex);
-+
-+ return err;
-+}
-+EXPORT_SYMBOL(register_xenbus_watch);
-+
-+void unregister_xenbus_watch(struct xenbus_watch *watch)
-+{
-+ struct xs_stored_msg *msg, *tmp;
-+ char token[sizeof(watch) * 2 + 1];
-+ int err;
-+
-+ sprintf(token, "%lX", (long)watch);
-+
-+ down_read(&xs_state.suspend_mutex);
-+
-+ spin_lock(&watches_lock);
-+ BUG_ON(!find_watch(token));
-+ list_del(&watch->list);
-+ spin_unlock(&watches_lock);
-+
-+ err = xs_unwatch(watch->node, token);
-+ if (err)
-+ printk(KERN_WARNING
-+ "XENBUS Failed to release watch %s: %i\n",
-+ watch->node, err);
-+
-+ up_read(&xs_state.suspend_mutex);
-+
-+ /* Cancel pending watch events. */
-+ spin_lock(&watch_events_lock);
-+ list_for_each_entry_safe(msg, tmp, &watch_events, list) {
-+ if (msg->u.watch.handle != watch)
-+ continue;
-+ list_del(&msg->list);
-+ kfree(msg->u.watch.vec);
-+ kfree(msg);
-+ }
-+ spin_unlock(&watch_events_lock);
-+
-+ /* Flush any currently-executing callback, unless we are it. :-) */
-+ if (current->pid != xenwatch_pid) {
-+ down(&xenwatch_mutex);
-+ up(&xenwatch_mutex);
-+ }
-+}
-+EXPORT_SYMBOL(unregister_xenbus_watch);
-+
-+void xs_suspend(void)
-+{
-+ down_write(&xs_state.suspend_mutex);
-+ down(&xs_state.request_mutex);
-+}
-+
-+void xs_resume(void)
-+{
-+ struct xenbus_watch *watch;
-+ char token[sizeof(watch) * 2 + 1];
-+
-+ up(&xs_state.request_mutex);
-+
-+ /* No need for watches_lock: the suspend_mutex is sufficient. */
-+ list_for_each_entry(watch, &watches, list) {
-+ sprintf(token, "%lX", (long)watch);
-+ xs_watch(watch->node, token);
-+ }
-+
-+ up_write(&xs_state.suspend_mutex);
-+}
-+
-+static int xenwatch_thread(void *unused)
-+{
-+ struct list_head *ent;
-+ struct xs_stored_msg *msg;
-+
-+ for (;;) {
-+ wait_event_interruptible(watch_events_waitq,
-+ !list_empty(&watch_events));
-+
-+ if (kthread_should_stop())
-+ break;
-+
-+ down(&xenwatch_mutex);
-+
-+ spin_lock(&watch_events_lock);
-+ ent = watch_events.next;
-+ if (ent != &watch_events)
-+ list_del(ent);
-+ spin_unlock(&watch_events_lock);
-+
-+ if (ent != &watch_events) {
-+ msg = list_entry(ent, struct xs_stored_msg, list);
-+ msg->u.watch.handle->callback(
-+ msg->u.watch.handle,
-+ (const char **)msg->u.watch.vec,
-+ msg->u.watch.vec_size);
-+ kfree(msg->u.watch.vec);
-+ kfree(msg);
-+ }
-+
-+ up(&xenwatch_mutex);
-+ }
-+
-+ return 0;
-+}
-+
-+static int process_msg(void)
-+{
-+ struct xs_stored_msg *msg;
-+ char *body;
-+ int err;
-+
-+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
-+ if (msg == NULL)
-+ return -ENOMEM;
-+
-+ err = xb_read(&msg->hdr, sizeof(msg->hdr));
-+ if (err) {
-+ kfree(msg);
-+ return err;
-+ }
-+
-+ body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
-+ if (body == NULL) {
-+ kfree(msg);
-+ return -ENOMEM;
-+ }
-+
-+ err = xb_read(body, msg->hdr.len);
-+ if (err) {
-+ kfree(body);
-+ kfree(msg);
-+ return err;
-+ }
-+ body[msg->hdr.len] = '\0';
-+
-+ if (msg->hdr.type == XS_WATCH_EVENT) {
-+ msg->u.watch.vec = split(body, msg->hdr.len,
-+ &msg->u.watch.vec_size);
-+ if (IS_ERR(msg->u.watch.vec)) {
-+ kfree(msg);
-+ return PTR_ERR(msg->u.watch.vec);
-+ }
-+
-+ spin_lock(&watches_lock);
-+ msg->u.watch.handle = find_watch(
-+ msg->u.watch.vec[XS_WATCH_TOKEN]);
-+ if (msg->u.watch.handle != NULL) {
-+ spin_lock(&watch_events_lock);
-+ list_add_tail(&msg->list, &watch_events);
-+ wake_up(&watch_events_waitq);
-+ spin_unlock(&watch_events_lock);
-+ } else {
-+ kfree(msg->u.watch.vec);
-+ kfree(msg);
-+ }
-+ spin_unlock(&watches_lock);
-+ } else {
-+ msg->u.reply.body = body;
-+ spin_lock(&xs_state.reply_lock);
-+ list_add_tail(&msg->list, &xs_state.reply_list);
-+ spin_unlock(&xs_state.reply_lock);
-+ wake_up(&xs_state.reply_waitq);
-+ }
-+
-+ return 0;
-+}
-+
-+static int xenbus_thread(void *unused)
-+{
-+ int err;
-+
-+ for (;;) {
-+ err = process_msg();
-+ if (err)
-+ printk(KERN_WARNING "XENBUS error %d while reading "
-+ "message\n", err);
-+ if (kthread_should_stop())
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+int xs_init(void)
-+{
-+ int err;
-+ struct task_struct *task;
-+
-+ INIT_LIST_HEAD(&xs_state.reply_list);
-+ spin_lock_init(&xs_state.reply_lock);
-+ init_waitqueue_head(&xs_state.reply_waitq);
-+
-+ init_MUTEX(&xs_state.request_mutex);
-+ init_rwsem(&xs_state.suspend_mutex);
-+
-+ /* Initialize the shared memory rings to talk to xenstored */
-+ err = xb_init_comms();
-+ if (err)
-+ return err;
-+
-+ task = kthread_run(xenwatch_thread, NULL, "xenwatch");
-+ if (IS_ERR(task))
-+ return PTR_ERR(task);
-+ xenwatch_pid = task->pid;
-+
-+ task = kthread_run(xenbus_thread, NULL, "xenbus");
-+ if (IS_ERR(task))
-+ return PTR_ERR(task);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/fs/Kconfig b/fs/Kconfig
-index e9749b0..de4c01e 100644
---- a/fs/Kconfig
-+++ b/fs/Kconfig
-@@ -841,6 +841,7 @@ config TMPFS
- config HUGETLBFS
- bool "HugeTLB file system support"
- depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
-+ depends !XEN
-
- config HUGETLB_PAGE
- def_bool HUGETLBFS
-diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h
-index ab17bb8..66db229 100644
---- a/include/asm-i386/a.out.h
-+++ b/include/asm-i386/a.out.h
-@@ -19,7 +19,7 @@ struct exec
-
- #ifdef __KERNEL__
-
--#define STACK_TOP TASK_SIZE
-+#define STACK_TOP (TASK_SIZE - 3*PAGE_SIZE)
-
- #endif
-
-diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h
-index 9075083..5ce4e2f 100644
---- a/include/asm-i386/agp.h
-+++ b/include/asm-i386/agp.h
-@@ -1,8 +1,10 @@
- #ifndef AGP_H
- #define AGP_H 1
-
-+#include <linux/config.h>
- #include <asm/pgtable.h>
- #include <asm/cacheflush.h>
-+#include <asm/system.h>
-
- /*
- * Functions to keep the agpgart mappings coherent with the MMU.
-@@ -22,13 +24,26 @@ int unmap_page_from_agp(struct page *pag
- #define flush_agp_cache() wbinvd()
-
- /* Convert a physical address to an address suitable for the GART. */
-+#ifndef CONFIG_X86_XEN
- #define phys_to_gart(x) (x)
- #define gart_to_phys(x) (x)
-+#else
-+#define phys_to_gart(x) phys_to_machine(x)
-+#define gart_to_phys(x) machine_to_phys(x)
-+#endif
-
- /* GATT allocation. Returns/accepts GATT kernel virtual address. */
-+#ifndef CONFIG_X86_XEN
- #define alloc_gatt_pages(order) \
- ((char *)__get_free_pages(GFP_KERNEL, (order)))
- #define free_gatt_pages(table, order) \
- free_pages((unsigned long)(table), (order))
-+#else
-+#define alloc_gatt_pages(order) ({ \
-+ dma_addr_t _d; \
-+ (char*)dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); })
-+#define free_gatt_pages(table, order) \
-+ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
-+#endif
-
- #endif
-diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
-index ff9ac8d..1fb4d71 100644
---- a/include/asm-i386/apic.h
-+++ b/include/asm-i386/apic.h
-@@ -132,10 +132,12 @@ extern unsigned int nmi_watchdog;
-
- extern int disable_timer_pin_1;
-
-+#ifndef CONFIG_XEN
- void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
- void switch_APIC_timer_to_ipi(void *cpumask);
- void switch_ipi_to_APIC_timer(void *cpumask);
- #define ARCH_APICTIMER_STOPS_ON_C3 1
-+#endif
-
- extern int timer_over_8254;
-
-diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
-index de649d3..4a9e85d 100644
---- a/include/asm-i386/atomic.h
-+++ b/include/asm-i386/atomic.h
-@@ -4,18 +4,13 @@
- #include <linux/config.h>
- #include <linux/compiler.h>
- #include <asm/processor.h>
-+#include <asm/smp_alt.h>
-
- /*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
--#ifdef CONFIG_SMP
--#define LOCK "lock ; "
--#else
--#define LOCK ""
--#endif
--
- /*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
-diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
-index 88e6ca2..5f438be 100644
---- a/include/asm-i386/bitops.h
-+++ b/include/asm-i386/bitops.h
-@@ -7,6 +7,7 @@
-
- #include <linux/config.h>
- #include <linux/compiler.h>
-+#include <asm/smp_alt.h>
-
- /*
- * These have to be done with inline assembly: that way the bit-setting
-@@ -16,12 +17,6 @@
- * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
- */
-
--#ifdef CONFIG_SMP
--#define LOCK_PREFIX "lock ; "
--#else
--#define LOCK_PREFIX ""
--#endif
--
- #define ADDR (*(volatile long *) addr)
-
- /**
-@@ -41,7 +36,7 @@
- */
- static inline void set_bit(int nr, volatile unsigned long * addr)
- {
-- __asm__ __volatile__( LOCK_PREFIX
-+ __asm__ __volatile__( LOCK
- "btsl %1,%0"
- :"+m" (ADDR)
- :"Ir" (nr));
-@@ -76,7 +71,7 @@ static inline void __set_bit(int nr, vol
- */
- static inline void clear_bit(int nr, volatile unsigned long * addr)
- {
-- __asm__ __volatile__( LOCK_PREFIX
-+ __asm__ __volatile__( LOCK
- "btrl %1,%0"
- :"+m" (ADDR)
- :"Ir" (nr));
-@@ -121,7 +116,7 @@ static inline void __change_bit(int nr,
- */
- static inline void change_bit(int nr, volatile unsigned long * addr)
- {
-- __asm__ __volatile__( LOCK_PREFIX
-+ __asm__ __volatile__( LOCK
- "btcl %1,%0"
- :"+m" (ADDR)
- :"Ir" (nr));
-@@ -140,7 +135,7 @@ static inline int test_and_set_bit(int n
- {
- int oldbit;
-
-- __asm__ __volatile__( LOCK_PREFIX
-+ __asm__ __volatile__( LOCK
- "btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"+m" (ADDR)
- :"Ir" (nr) : "memory");
-@@ -180,7 +175,7 @@ static inline int test_and_clear_bit(int
- {
- int oldbit;
-
-- __asm__ __volatile__( LOCK_PREFIX
-+ __asm__ __volatile__( LOCK
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"+m" (ADDR)
- :"Ir" (nr) : "memory");
-@@ -231,7 +226,7 @@ static inline int test_and_change_bit(in
- {
- int oldbit;
-
-- __asm__ __volatile__( LOCK_PREFIX
-+ __asm__ __volatile__( LOCK
- "btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"+m" (ADDR)
- :"Ir" (nr) : "memory");
-diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
-index 89b8b82..2dfbb9f 100644
---- a/include/asm-i386/desc.h
-+++ b/include/asm-i386/desc.h
-@@ -33,6 +33,8 @@ static inline struct desc_struct *get_cp
- return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
- }
-
-+#ifndef CONFIG_X86_XEN
-+
- #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
- #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
-
-@@ -41,6 +43,8 @@ static inline struct desc_struct *get_cp
- #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
- #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
-
-+#endif
-+
- #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
- #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
- #define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
-@@ -53,6 +57,8 @@ static inline struct desc_struct *get_cp
- extern struct desc_struct default_ldt[];
- extern void set_intr_gate(unsigned int irq, void * addr);
-
-+#ifndef CONFIG_X86_XEN
-+
- #define _set_tssldt_desc(n,addr,limit,type) \
- __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
- "movw %w1,2(%2)\n\t" \
-@@ -64,6 +70,7 @@ __asm__ __volatile__ ("movw %w3,0(%2)\n\
- "rorl $16,%1" \
- : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
-
-+#ifndef CONFIG_X86_NO_TSS
- static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
- {
- _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
-@@ -71,12 +78,15 @@ static inline void __set_tss_desc(unsign
- }
-
- #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
-+#endif
-
- static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
- {
- _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
- }
-
-+#endif
-+
- #define LDT_entry_a(info) \
- ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-
-@@ -102,12 +112,16 @@ static inline void set_ldt_desc(unsigned
- (info)->seg_not_present == 1 && \
- (info)->useable == 0 )
-
-+#ifndef CONFIG_X86_XEN
- static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
- {
- __u32 *lp = (__u32 *)((char *)ldt + entry*8);
- *lp = entry_a;
- *(lp+1) = entry_b;
- }
-+#else
-+extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
-+#endif
-
- #if TLS_SIZE != 24
- # error update this code.
-@@ -115,17 +129,41 @@ static inline void write_ldt_entry(void
-
- static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
- {
-+#ifndef CONFIG_X86_XEN
- #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
-+#else
-+#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
-+#endif
- C(0); C(1); C(2);
- #undef C
- }
-
-+#ifndef CONFIG_X86_XEN
-+#define DEFAULT_LDT &default_ldt[0]
-+#define DEFAULT_LDT_SIZE 5
-+static inline void __set_ldt(unsigned int cpu, void *addr, unsigned int size)
-+{
-+ set_ldt_desc(cpu, addr, size);
-+ load_LDT_desc();
-+}
-+#else
-+/*
-+ * NB. We load the default_ldt on demand, as it slows
-+ * down context switching. Noone uses it anyway.
-+ */
-+#define DEFAULT_LDT NULL
-+#define DEFAULT_LDT_SIZE 0
-+static inline void __set_ldt(unsigned int cpu, void *addr, unsigned int size)
-+{
-+ xen_set_ldt((unsigned long)addr, size);
-+}
-+#endif
-+
- static inline void clear_LDT(void)
- {
- int cpu = get_cpu();
-
-- set_ldt_desc(cpu, &default_ldt[0], 5);
-- load_LDT_desc();
-+ __set_ldt(cpu, DEFAULT_LDT, DEFAULT_LDT_SIZE);
- put_cpu();
- }
-
-@@ -138,12 +176,11 @@ static inline void load_LDT_nolock(mm_co
- int count = pc->size;
-
- if (likely(!count)) {
-- segments = &default_ldt[0];
-- count = 5;
-+ segments = DEFAULT_LDT;
-+ count = DEFAULT_LDT_SIZE;
- }
-
-- set_ldt_desc(cpu, segments, count);
-- load_LDT_desc();
-+ __set_ldt(cpu, segments, count);
- }
-
- static inline void load_LDT(mm_context_t *pc)
-diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
-index 9cf20ca..7508849 100644
---- a/include/asm-i386/dma-mapping.h
-+++ b/include/asm-i386/dma-mapping.h
-@@ -7,6 +7,8 @@
- #include <asm/io.h>
- #include <asm/scatterlist.h>
- #include <asm/bug.h>
-+#include <asm/swiotlb.h>
-+#include <mach_dma_map.h>
-
- #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
- #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-@@ -17,78 +19,22 @@ void *dma_alloc_coherent(struct device *
- void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
--static inline dma_addr_t
--dma_map_single(struct device *dev, void *ptr, size_t size,
-- enum dma_data_direction direction)
--{
-- if (direction == DMA_NONE)
-- BUG();
-- WARN_ON(size == 0);
-- flush_write_buffers();
-- return virt_to_phys(ptr);
--}
--
--static inline void
--dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-- enum dma_data_direction direction)
--{
-- if (direction == DMA_NONE)
-- BUG();
--}
--
--static inline int
--dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-- enum dma_data_direction direction)
--{
-- int i;
--
-- if (direction == DMA_NONE)
-- BUG();
-- WARN_ON(nents == 0 || sg[0].length == 0);
--
-- for (i = 0; i < nents; i++ ) {
-- BUG_ON(!sg[i].page);
--
-- sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
-- }
--
-- flush_write_buffers();
-- return nents;
--}
--
--static inline dma_addr_t
--dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-- size_t size, enum dma_data_direction direction)
--{
-- BUG_ON(direction == DMA_NONE);
-- return page_to_phys(page) + offset;
--}
--
--static inline void
--dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-- enum dma_data_direction direction)
--{
-- BUG_ON(direction == DMA_NONE);
--}
--
--
--static inline void
--dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-- enum dma_data_direction direction)
--{
-- BUG_ON(direction == DMA_NONE);
--}
--
- static inline void
- dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
- {
-+ BUG_ON(direction == DMA_NONE);
-+ if (swiotlb)
-+ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
- }
-
- static inline void
- dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
- {
-+ BUG_ON(direction == DMA_NONE);
-+ if (swiotlb)
-+ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
- flush_write_buffers();
- }
-
-@@ -97,6 +43,9 @@ dma_sync_single_range_for_cpu(struct dev
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
- {
-+ BUG_ON(direction == DMA_NONE);
-+ if (swiotlb)
-+ swiotlb_sync_single_range_for_cpu(dev, dma_handle, offset, size, direction);
- }
-
- static inline void
-@@ -104,6 +53,9 @@ dma_sync_single_range_for_device(struct
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
- {
-+ BUG_ON(direction == DMA_NONE);
-+ if (swiotlb)
-+ swiotlb_sync_single_range_for_device(dev, dma_handle, offset, size, direction);
- flush_write_buffers();
- }
-
-@@ -111,36 +63,22 @@ static inline void
- dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
- {
-+ BUG_ON(direction == DMA_NONE);
-+ if (swiotlb)
-+ swiotlb_sync_sg_for_cpu(dev, sg, nelems, direction);
- }
-
- static inline void
- dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
- {
-+ BUG_ON(direction == DMA_NONE);
-+ if (swiotlb)
-+ swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
- flush_write_buffers();
- }
-
- static inline int
--dma_mapping_error(dma_addr_t dma_addr)
--{
-- return 0;
--}
--
--static inline int
--dma_supported(struct device *dev, u64 mask)
--{
-- /*
-- * we fall back to GFP_DMA when the mask isn't all 1s,
-- * so we can't guarantee allocations that must be
-- * within a tighter range than GFP_DMA..
-- */
-- if(mask < 0x00ffffff)
-- return 0;
--
-- return 1;
--}
--
--static inline int
- dma_set_mask(struct device *dev, u64 mask)
- {
- if(!dev->dma_mask || !dma_supported(dev, mask))
-diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
-index 4153d80..b08208c 100644
---- a/include/asm-i386/elf.h
-+++ b/include/asm-i386/elf.h
-@@ -129,11 +129,16 @@ extern int dump_task_extended_fpu (struc
- #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
- #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
-
--#define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL))
-+#define VSYSCALL_BASE (PAGE_OFFSET - 2*PAGE_SIZE)
- #define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE)
- #define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall)
- extern void __kernel_vsyscall;
-
-+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
-+struct linux_binprm;
-+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
-+ int executable_stack);
-+
- #define ARCH_DLINFO \
- do { \
- NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \
-diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
-index cfb1c61..a533f08 100644
---- a/include/asm-i386/fixmap.h
-+++ b/include/asm-i386/fixmap.h
-@@ -20,7 +20,7 @@
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap.
- */
--#define __FIXADDR_TOP 0xfffff000
-+extern unsigned long __FIXADDR_TOP;
-
- #ifndef __ASSEMBLY__
- #include <linux/kernel.h>
-@@ -31,6 +31,9 @@
- #include <linux/threads.h>
- #include <asm/kmap_types.h>
- #endif
-+#ifdef CONFIG_XEN
-+#include <xen/gnttab.h>
-+#endif
-
- /*
- * Here we define all the compile-time 'special' virtual
-@@ -52,7 +55,6 @@
- */
- enum fixed_addresses {
- FIX_HOLE,
-- FIX_VSYSCALL,
- #ifdef CONFIG_X86_LOCAL_APIC
- FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
- #endif
-@@ -83,6 +85,14 @@ enum fixed_addresses {
- #ifdef CONFIG_PCI_MMCONFIG
- FIX_PCIE_MCFG,
- #endif
-+#ifdef CONFIG_XEN
-+ FIX_SHARED_INFO,
-+ FIX_GNTTAB_BEGIN,
-+ FIX_GNTTAB_END = FIX_GNTTAB_BEGIN + NR_GRANT_FRAMES - 1,
-+#define NR_FIX_ISAMAPS 256
-+ FIX_ISAMAP_END,
-+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-+#endif
- __end_of_permanent_fixed_addresses,
- /* temporary boot-time mappings, used before ioremap() is functional */
- #define NR_FIX_BTMAPS 16
-@@ -93,7 +103,9 @@ enum fixed_addresses {
- };
-
- extern void __set_fixmap (enum fixed_addresses idx,
-- unsigned long phys, pgprot_t flags);
-+ maddr_t phys, pgprot_t flags);
-+
-+extern void set_fixaddr_top(unsigned long top);
-
- #define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-@@ -116,14 +128,6 @@ extern void __set_fixmap (enum fixed_add
- #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
- #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
--/*
-- * This is the range that is readable by user mode, and things
-- * acting like user mode such as get_user_pages.
-- */
--#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
--#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
--
--
- extern void __this_fixmap_does_not_exist(void);
-
- /*
-diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h
-index 79727af..37ea9eb 100644
---- a/include/asm-i386/floppy.h
-+++ b/include/asm-i386/floppy.h
-@@ -6,6 +6,8 @@
- * for more details.
- *
- * Copyright (C) 1995
-+ *
-+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
- */
- #ifndef __ASM_I386_FLOPPY_H
- #define __ASM_I386_FLOPPY_H
-@@ -13,6 +15,7 @@
- #include <linux/vmalloc.h>
-
-
-+#ifndef CONFIG_X86_XEN
- /*
- * The DMA channel used by the floppy controller cannot access data at
- * addresses >= 16MB
-@@ -25,6 +28,13 @@
- (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
-
- #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
-+#else
-+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-+#include <asm/dma.h>
-+#undef MAX_DMA_ADDRESS
-+#define MAX_DMA_ADDRESS 0
-+#define CROSS_64KB(a,s) (0)
-+#endif
-
-
- #define SW fd_routine[use_virtual_dma&1]
-@@ -43,7 +53,9 @@
- #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
- #define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
-
-+#ifndef CONFIG_X86_XEN
- #define FLOPPY_CAN_FALLBACK_ON_NODMA
-+#endif
-
- static int virtual_dma_count;
- static int virtual_dma_residue;
-@@ -187,6 +199,8 @@ static int fd_request_irq(void)
-
- }
-
-+#ifndef CONFIG_X86_XEN
-+
- static unsigned long dma_mem_alloc(unsigned long size)
- {
- return __get_dma_pages(GFP_KERNEL,get_order(size));
-@@ -227,6 +241,18 @@ static void _fd_chose_dma_mode(char *add
-
- #define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
-
-+#else
-+
-+static unsigned long vdma_mem_alloc(unsigned long size)
-+{
-+ /*
-+ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
-+ * softirq context via motor_off_callback. A generic bug we happen to trigger.
-+ */
-+ return __get_free_pages(GFP_KERNEL, get_order(size));
-+}
-+
-+#endif
-
- static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
- {
-@@ -239,6 +265,7 @@ static int vdma_dma_setup(char *addr, un
- return 0;
- }
-
-+#ifndef CONFIG_X86_XEN
- static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
- {
- #ifdef FLOPPY_SANITY_CHECK
-@@ -256,6 +283,7 @@ static int hard_dma_setup(char *addr, un
- enable_dma(FLOPPY_DMA);
- return 0;
- }
-+#endif
-
- static struct fd_routine_l {
- int (*_request_dma)(unsigned int dmanr, const char * device_id);
-@@ -265,11 +293,13 @@ static struct fd_routine_l {
- int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
- } fd_routine[] = {
- {
-+#ifndef CONFIG_X86_XEN
- request_dma,
- free_dma,
- get_dma_residue,
- dma_mem_alloc,
- hard_dma_setup
-+#endif
- },
- {
- vdma_request_dma,
-@@ -281,7 +311,18 @@ static struct fd_routine_l {
- };
-
-
-+#ifndef CONFIG_X86_XEN
- static int FDC1 = 0x3f0;
-+#else
-+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-+static int xen_floppy_init(void)
-+{
-+ use_virtual_dma = 1;
-+ can_use_virtual_dma = 1;
-+ return 0x3f0;
-+}
-+#define FDC1 xen_floppy_init()
-+#endif
- static int FDC2 = -1;
-
- /*
-@@ -312,7 +353,9 @@ static int FDC2 = -1;
-
- #define FLOPPY_MOTOR_MASK 0xf0
-
-+#ifndef CONFIG_X86_XEN
- #define AUTO_DMA
-+#endif
-
- #define EXTRA_FLOPPY_PARAMS
-
-diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
-index 44b9db8..24c9fd3 100644
---- a/include/asm-i386/futex.h
-+++ b/include/asm-i386/futex.h
-@@ -28,7 +28,7 @@
- "1: movl %2, %0\n\
- movl %0, %3\n" \
- insn "\n" \
--"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
-+"2: " LOCK "cmpxchgl %3, %2\n\
- jnz 1b\n\
- 3: .section .fixup,\"ax\"\n\
- 4: mov %5, %1\n\
-@@ -68,7 +68,7 @@ futex_atomic_op_inuser (int encoded_op,
- #endif
- switch (op) {
- case FUTEX_OP_ADD:
-- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
-+ __futex_atomic_op1(LOCK "xaddl %0, %2", ret,
- oldval, uaddr, oparg);
- break;
- case FUTEX_OP_OR:
-diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h
-index 0fd3313..775a136 100644
---- a/include/asm-i386/highmem.h
-+++ b/include/asm-i386/highmem.h
-@@ -69,6 +69,11 @@ extern void FASTCALL(kunmap_high(struct
- void *kmap(struct page *page);
- void kunmap(struct page *page);
- void *kmap_atomic(struct page *page, enum km_type type);
-+#ifndef CONFIG_X86_XEN
-+#define kmap_atomic_pte kmap_atomic
-+#else
-+void *kmap_atomic_pte(struct page *page, enum km_type type);
-+#endif
- void kunmap_atomic(void *kvaddr, enum km_type type);
- void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
- struct page *kmap_atomic_to_page(void *ptr);
-diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
-index 622815b..3173498 100644
---- a/include/asm-i386/hw_irq.h
-+++ b/include/asm-i386/hw_irq.h
-@@ -68,7 +68,9 @@ extern atomic_t irq_mis_count;
-
- #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-
--#if defined(CONFIG_X86_IO_APIC)
-+#if defined(CONFIG_X86_XEN)
-+extern void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i);
-+#elif defined(CONFIG_X86_IO_APIC)
- static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
- {
- if (IO_APIC_IRQ(i))
-diff --git a/include/asm-i386/hypercall.h b/include/asm-i386/hypercall.h
-new file mode 100644
-index 0000000..1b2605e
---- /dev/null
-+++ b/include/asm-i386/hypercall.h
-@@ -0,0 +1,329 @@
-+/******************************************************************************
-+ * hypercall.h
-+ *
-+ * Linux-specific hypervisor handling.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
-+
-+#include <linux/config.h>
-+
-+#ifdef CONFIG_X86_XEN
-+
-+#include <xen/interface/xen.h>
-+#include <xen/interface/sched.h>
-+#include <xen/interface/nmi.h>
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#define _hypercall0(type, name) \
-+({ \
-+ long __res; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res) \
-+ : \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall1(type, name, a1) \
-+({ \
-+ long __res, __ign1; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=b" (__ign1) \
-+ : "1" ((long)(a1)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall2(type, name, a1, a2) \
-+({ \
-+ long __res, __ign1, __ign2; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall3(type, name, a1, a2, a3) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
-+ "=d" (__ign3) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall4(type, name, a1, a2, a3, a4) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3, __ign4; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
-+ "=d" (__ign3), "=S" (__ign4) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)), "4" ((long)(a4)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
-+ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)), "4" ((long)(a4)), \
-+ "5" ((long)(a5)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+static inline int
-+HYPERVISOR_set_trap_table(
-+ trap_info_t *table)
-+{
-+ return _hypercall1(int, set_trap_table, table);
-+}
-+
-+static inline int
-+HYPERVISOR_mmu_update(
-+ mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_mmuext_op(
-+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_set_gdt(
-+ unsigned long *frame_list, int entries)
-+{
-+ return _hypercall2(int, set_gdt, frame_list, entries);
-+}
-+
-+static inline int
-+HYPERVISOR_stack_switch(
-+ unsigned long ss, unsigned long esp)
-+{
-+ return _hypercall2(int, stack_switch, ss, esp);
-+}
-+
-+static inline int
-+HYPERVISOR_set_callbacks(
-+ unsigned long event_selector, unsigned long event_address,
-+ unsigned long failsafe_selector, unsigned long failsafe_address)
-+{
-+ return _hypercall4(int, set_callbacks,
-+ event_selector, event_address,
-+ failsafe_selector, failsafe_address);
-+}
-+
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+ int set)
-+{
-+ return _hypercall1(int, fpu_taskswitch, set);
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op(
-+ int cmd, unsigned long arg)
-+{
-+ return _hypercall2(int, sched_op, cmd, arg);
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(
-+ u64 timeout)
-+{
-+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
-+ unsigned long timeout_lo = (unsigned long)timeout;
-+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
-+}
-+
-+static inline int
-+HYPERVISOR_dom0_op(
-+ dom0_op_t *dom0_op)
-+{
-+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
-+ return _hypercall1(int, dom0_op, dom0_op);
-+}
-+
-+static inline int
-+HYPERVISOR_set_debugreg(
-+ int reg, unsigned long value)
-+{
-+ return _hypercall2(int, set_debugreg, reg, value);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+ int reg)
-+{
-+ return _hypercall1(unsigned long, get_debugreg, reg);
-+}
-+
-+static inline int
-+HYPERVISOR_update_descriptor(
-+ u64 ma, u64 desc)
-+{
-+ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
-+}
-+
-+static inline int
-+HYPERVISOR_memory_op(
-+ unsigned int cmd, void *arg)
-+{
-+ return _hypercall2(int, memory_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_multicall(
-+ void *call_list, int nr_calls)
-+{
-+ return _hypercall2(int, multicall, call_list, nr_calls);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+ unsigned long va, pte_t new_val, unsigned long flags)
-+{
-+ unsigned long pte_hi = 0;
-+#ifdef CONFIG_X86_PAE
-+ pte_hi = new_val.pte_high;
-+#endif
-+ return _hypercall4(int, update_va_mapping, va,
-+ new_val.pte_low, pte_hi, flags);
-+}
-+
-+static inline int
-+HYPERVISOR_event_channel_op(
-+ void *op)
-+{
-+ return _hypercall1(int, event_channel_op, op);
-+}
-+
-+static inline int
-+HYPERVISOR_xen_version(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, xen_version, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_console_io(
-+ int cmd, int count, char *str)
-+{
-+ return _hypercall3(int, console_io, cmd, count, str);
-+}
-+
-+static inline int
-+HYPERVISOR_physdev_op(
-+ void *physdev_op)
-+{
-+ return _hypercall1(int, physdev_op, physdev_op);
-+}
-+
-+static inline int
-+HYPERVISOR_grant_table_op(
-+ unsigned int cmd, void *uop, unsigned int count)
-+{
-+ return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+ unsigned long pte_hi = 0;
-+#ifdef CONFIG_X86_PAE
-+ pte_hi = new_val.pte_high;
-+#endif
-+ return _hypercall5(int, update_va_mapping_otherdomain, va,
-+ new_val.pte_low, pte_hi, flags, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_vm_assist(
-+ unsigned int cmd, unsigned int type)
-+{
-+ return _hypercall2(int, vm_assist, cmd, type);
-+}
-+
-+static inline int
-+HYPERVISOR_vcpu_op(
-+ int cmd, int vcpuid, void *extra_args)
-+{
-+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
-+
-+static inline int
-+HYPERVISOR_suspend(
-+ unsigned long srec)
-+{
-+ return _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+ SHUTDOWN_suspend, srec);
-+}
-+
-+static inline int
-+HYPERVISOR_nmi_op(
-+ unsigned long op,
-+ unsigned long arg)
-+{
-+ return _hypercall2(int, nmi_op, op, arg);
-+}
-+
-+#endif /* CONFIG_X86_XEN */
-+
-+#endif /* __HYPERCALL_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/asm-i386/hypervisor.h b/include/asm-i386/hypervisor.h
-new file mode 100644
-index 0000000..021e2de
---- /dev/null
-+++ b/include/asm-i386/hypervisor.h
-@@ -0,0 +1,168 @@
-+/******************************************************************************
-+ * hypervisor.h
-+ *
-+ * Linux-specific hypervisor handling.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERVISOR_H__
-+#define __HYPERVISOR_H__
-+
-+#include <linux/config.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/version.h>
-+
-+#ifdef CONFIG_XEN
-+
-+#include <xen/interface/xen.h>
-+#include <xen/interface/dom0_ops.h>
-+#include <asm/ptrace.h>
-+#include <asm/page.h>
-+#if defined(__i386__)
-+# ifdef CONFIG_X86_PAE
-+# include <asm-generic/pgtable-nopud.h>
-+# else
-+# include <asm-generic/pgtable-nopmd.h>
-+# endif
-+#endif
-+
-+extern shared_info_t *HYPERVISOR_shared_info;
-+
-+/* arch/xen/i386/kernel/setup.c */
-+extern start_info_t *xen_start_info;
-+
-+/* arch/xen/kernel/evtchn.c */
-+/* Force a proper event-channel callback from Xen. */
-+void force_evtchn_callback(void);
-+
-+/* arch/xen/kernel/process.c */
-+void xen_cpu_idle (void);
-+
-+/* arch/xen/i386/kernel/hypervisor.c */
-+void do_hypervisor_callback(struct pt_regs *regs);
-+
-+/* arch/xen/i386/mm/hypervisor.c */
-+/*
-+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
-+ * be MACHINE addresses.
-+ */
-+
-+void xen_pt_switch(unsigned long ptr);
-+void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
-+void xen_load_gs(unsigned int selector); /* x86_64 only */
-+void xen_tlb_flush(void);
-+void xen_invlpg(unsigned long ptr);
-+
-+void xen_l1_entry_update(pte_t *ptr, pte_t val);
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
-+void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
-+void xen_pgd_pin(unsigned long ptr);
-+void xen_pgd_unpin(unsigned long ptr);
-+void xen_pud_pin(unsigned long ptr); /* x86_64 only */
-+void xen_pud_unpin(unsigned long ptr); /* x86_64 only */
-+void xen_pmd_pin(unsigned long ptr); /* x86_64 only */
-+void xen_pmd_unpin(unsigned long ptr); /* x86_64 only */
-+void xen_pte_pin(unsigned long ptr);
-+void xen_pte_unpin(unsigned long ptr);
-+
-+void xen_set_ldt(unsigned long ptr, unsigned long bytes);
-+void xen_machphys_update(unsigned long mfn, unsigned long pfn);
-+
-+#ifdef CONFIG_SMP
-+#include <linux/cpumask.h>
-+void xen_tlb_flush_all(void);
-+void xen_invlpg_all(unsigned long ptr);
-+void xen_tlb_flush_mask(cpumask_t *mask);
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
-+#endif
-+
-+/* Returns zero on success else negative errno. */
-+int xen_create_contiguous_region(
-+ unsigned long vstart, unsigned int order, unsigned int address_bits);
-+void xen_destroy_contiguous_region(
-+ unsigned long vstart, unsigned int order);
-+
-+#include <asm/hypercall.h>
-+
-+#if defined(CONFIG_X86_64)
-+#define MULTI_UVMFLAGS_INDEX 2
-+#define MULTI_UVMDOMID_INDEX 3
-+#else
-+#define MULTI_UVMFLAGS_INDEX 3
-+#define MULTI_UVMDOMID_INDEX 4
-+#endif
-+
-+#define xen_init() (0)
-+
-+static inline void
-+MULTI_update_va_mapping(
-+ multicall_entry_t *mcl, unsigned long va,
-+ pte_t new_val, unsigned long flags)
-+{
-+ mcl->op = __HYPERVISOR_update_va_mapping;
-+ mcl->args[0] = va;
-+#if defined(CONFIG_X86_64)
-+ mcl->args[1] = new_val.pte;
-+ mcl->args[2] = flags;
-+#elif defined(CONFIG_X86_PAE)
-+ mcl->args[1] = new_val.pte_low;
-+ mcl->args[2] = new_val.pte_high;
-+ mcl->args[3] = flags;
-+#else
-+ mcl->args[1] = new_val.pte_low;
-+ mcl->args[2] = 0;
-+ mcl->args[3] = flags;
-+#endif
-+}
-+
-+static inline void
-+MULTI_update_va_mapping_otherdomain(
-+ multicall_entry_t *mcl, unsigned long va,
-+ pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+ mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
-+ mcl->args[0] = va;
-+#if defined(CONFIG_X86_64)
-+ mcl->args[1] = new_val.pte;
-+ mcl->args[2] = flags;
-+ mcl->args[3] = domid;
-+#elif defined(CONFIG_X86_PAE)
-+ mcl->args[1] = new_val.pte_low;
-+ mcl->args[2] = new_val.pte_high;
-+ mcl->args[3] = flags;
-+ mcl->args[4] = domid;
-+#else
-+ mcl->args[1] = new_val.pte_low;
-+ mcl->args[2] = 0;
-+ mcl->args[3] = flags;
-+ mcl->args[4] = domid;
-+#endif
-+}
-+
-+#endif /* CONFIG_XEN */
-+
-+#endif /* __HYPERVISOR_H__ */
-diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
-index 03233c2..b788028 100644
---- a/include/asm-i386/io.h
-+++ b/include/asm-i386/io.h
-@@ -46,7 +46,8 @@
- #ifdef __KERNEL__
-
- #include <asm-generic/iomap.h>
--
-+#include <mach_io.h>
-+#include <asm/fixmap.h>
- #include <linux/vmalloc.h>
-
- /*
-@@ -96,11 +97,6 @@ static inline void * phys_to_virt(unsign
- return __va(address);
- }
-
--/*
-- * Change "struct page" to physical address.
-- */
--#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
--
- extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-
- /**
-@@ -137,22 +133,6 @@ extern void bt_iounmap(void *addr, unsig
- #define dmi_alloc alloc_bootmem
-
- /*
-- * ISA I/O bus memory addresses are 1:1 with the physical address.
-- */
--#define isa_virt_to_bus virt_to_phys
--#define isa_page_to_bus page_to_phys
--#define isa_bus_to_virt phys_to_virt
--
--/*
-- * However PCI ones are not necessarily 1:1 and therefore these interfaces
-- * are forbidden in portable PCI drivers.
-- *
-- * Allow them on x86 for legacy drivers, though.
-- */
--#define virt_to_bus virt_to_phys
--#define bus_to_virt phys_to_virt
--
--/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
-@@ -209,16 +189,6 @@ static inline void memcpy_toio(volatile
- __memcpy((void __force *) dst, src, count);
- }
-
--/*
-- * ISA space is 'always mapped' on a typical x86 system, no need to
-- * explicitly ioremap() it. The fact that the ISA IO space is mapped
-- * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-- * are physical addresses. The following constant pointer can be
-- * used as the IO-area pointer (it can be iounmapped as well, so the
-- * analogy with PCI is quite large):
-- */
--#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
--
- #define isa_readb(a) readb(__ISA_IO_base + (a))
- #define isa_readw(a) readw(__ISA_IO_base + (a))
- #define isa_readl(a) readl(__ISA_IO_base + (a))
-@@ -383,4 +353,9 @@ BUILDIO(b,b,char)
- BUILDIO(w,w,short)
- BUILDIO(l,,int)
-
-+#ifdef CONFIG_X86_XEN
-+/* We will be supplying our own /dev/mem implementation */
-+#define ARCH_HAS_DEV_MEM
-+#endif
-+
- #endif
-diff --git a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h
-index 6886a0c..7a660e8 100644
---- a/include/asm-i386/kmap_types.h
-+++ b/include/asm-i386/kmap_types.h
-@@ -23,7 +23,8 @@ D(9) KM_IRQ0,
- D(10) KM_IRQ1,
- D(11) KM_SOFTIRQ0,
- D(12) KM_SOFTIRQ1,
--D(13) KM_TYPE_NR
-+D(13) KM_SWIOTLB,
-+D(14) KM_TYPE_NR
- };
-
- #undef D
-diff --git a/include/asm-i386/mach-default/mach_dma_map.h b/include/asm-i386/mach-default/mach_dma_map.h
-new file mode 100644
-index 0000000..5a594ea
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_dma_map.h
-@@ -0,0 +1,85 @@
-+#ifndef __ASM_MACH_DMA_MAP_H
-+#define __ASM_MACH_DMA_MAP_H
-+
-+static inline dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ if (direction == DMA_NONE)
-+ BUG();
-+ WARN_ON(size == 0);
-+ flush_write_buffers();
-+ return virt_to_phys(ptr);
-+}
-+
-+static inline void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ if (direction == DMA_NONE)
-+ BUG();
-+}
-+
-+static inline int
-+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-+ enum dma_data_direction direction)
-+{
-+ int i;
-+
-+ if (direction == DMA_NONE)
-+ BUG();
-+ WARN_ON(nents == 0 || sg[0].length == 0);
-+
-+ for (i = 0; i < nents; i++ ) {
-+ BUG_ON(!sg[i].page);
-+
-+ sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
-+ }
-+
-+ flush_write_buffers();
-+ return nents;
-+}
-+
-+static inline void
-+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-+ enum dma_data_direction direction)
-+{
-+ BUG_ON(direction == DMA_NONE);
-+}
-+
-+static inline dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+ size_t size, enum dma_data_direction direction)
-+{
-+ BUG_ON(direction == DMA_NONE);
-+ return page_to_phys(page) + offset;
-+}
-+
-+static inline void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+ enum dma_data_direction direction)
-+{
-+ BUG_ON(direction == DMA_NONE);
-+}
-+
-+static inline int
-+dma_mapping_error(dma_addr_t dma_addr)
-+{
-+ return 0;
-+}
-+
-+static inline int
-+dma_supported(struct device *dev, u64 mask)
-+{
-+ /*
-+ * we fall back to GFP_DMA when the mask isn't all 1s,
-+ * so we can't guarantee allocations that must be
-+ * within a tighter range than GFP_DMA..
-+ */
-+ if(mask < 0x00ffffff)
-+ return 0;
-+
-+ return 1;
-+}
-+
-+#endif /* __ASM_MACH_DMA_MAP_H */
-diff --git a/include/asm-i386/mach-default/mach_fixmap.h b/include/asm-i386/mach-default/mach_fixmap.h
-new file mode 100644
-index 0000000..5d9fc52
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_fixmap.h
-@@ -0,0 +1,15 @@
-+#ifndef __ASM_MACH_FIXMAP_H
-+#define __ASM_MACH_FIXMAP_H
-+
-+/* used by vmalloc.c, vsyscall.lds.S.
-+ *
-+ * Leave one empty page between vmalloc'ed areas and
-+ * the start of the fixmap.
-+ */
-+#define __FIXADDR_TOP 0xfffff000
-+
-+#ifndef __ASSEMBLY__
-+typedef unsigned long maddr_t;
-+#endif
-+
-+#endif /* __ASM_MACH_FIXMAP_H */
-diff --git a/include/asm-i386/mach-default/mach_io.h b/include/asm-i386/mach-default/mach_io.h
-new file mode 100644
-index 0000000..3597ae5
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_io.h
-@@ -0,0 +1,35 @@
-+#ifndef __ASM_MACH_IO_H
-+#define __ASM_MACH_IO_H
-+
-+/*
-+ * Change "struct page" to physical address.
-+ */
-+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+
-+/*
-+ * ISA space is 'always mapped' on a typical x86 system, no need to
-+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
-+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-+ * are physical addresses. The following constant pointer can be
-+ * used as the IO-area pointer (it can be iounmapped as well, so the
-+ * analogy with PCI is quite large):
-+ */
-+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
-+
-+/*
-+ * ISA I/O bus memory addresses are 1:1 with the physical address.
-+ */
-+#define isa_virt_to_bus virt_to_phys
-+#define isa_page_to_bus page_to_phys
-+#define isa_bus_to_virt phys_to_virt
-+
-+/*
-+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
-+ * are forbidden in portable PCI drivers.
-+ *
-+ * Allow them on x86 for legacy drivers, though.
-+ */
-+#define virt_to_bus virt_to_phys
-+#define bus_to_virt phys_to_virt
-+
-+#endif /* __ASM_MACH_IO_H */
-diff --git a/include/asm-i386/mach-default/mach_mmu.h b/include/asm-i386/mach-default/mach_mmu.h
-new file mode 100644
-index 0000000..bbd7d44
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_mmu.h
-@@ -0,0 +1,18 @@
-+#ifndef __ASM_MACH_MMU_H
-+#define __ASM_MACH_MMU_H
-+
-+#define MACH_SWITCH_DECLS
-+
-+static inline void mach_switch_pgd(struct mm_struct *next, int cpu)
-+{
-+ load_cr3(next->pgd);
-+}
-+
-+static inline void mach_switch_ldt(struct mm_struct *next, int cpu)
-+{
-+ load_LDT_nolock(&next->context, cpu);
-+}
-+
-+#define mach_switch_commit() 0
-+
-+#endif /* __ASM_MACH_MMU_H */
-diff --git a/include/asm-i386/mach-default/mach_mode.h b/include/asm-i386/mach-default/mach_mode.h
-new file mode 100644
-index 0000000..1c3b98d
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_mode.h
-@@ -0,0 +1,7 @@
-+#ifndef __ASM_MACH_MODE_H
-+#define __ASM_MACH_MODE_H
-+
-+#define KERNEL_RPL 0
-+#define USER_MODE_MASK 3
-+
-+#endif /* __ASM_MACH_MODE_H */
-diff --git a/include/asm-i386/mach-default/mach_page.h b/include/asm-i386/mach-default/mach_page.h
-new file mode 100644
-index 0000000..b8beb05
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_page.h
-@@ -0,0 +1,26 @@
-+#ifndef __ASM_MACH_PAGE_H
-+#define __ASM_MACH_PAGE_H
-+
-+#ifndef __ASSEMBLY__
-+
-+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
-+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-+
-+#ifdef CONFIG_X86_PAE
-+#define pmd_val(x) ((x).pmd)
-+#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-+#define __pmd(x) ((pmd_t) { (x) } )
-+#else
-+#define pte_val(x) ((x).pte_low)
-+#endif
-+#define pte_val_ma pte_val
-+
-+#define pgd_val(x) ((x).pgd)
-+
-+#define __pte(x) ((pte_t) { (x) } )
-+#define __pte_ma __pte
-+#define __pgd(x) ((pgd_t) { (x) } )
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* __ASM_MACH_PAGE_H */
-diff --git a/include/asm-i386/mach-default/mach_pgtable.h b/include/asm-i386/mach-default/mach_pgtable.h
-new file mode 100644
-index 0000000..118b32c
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_pgtable.h
-@@ -0,0 +1,45 @@
-+#ifndef __ASM_MACH_PGTABLE_H
-+#define __ASM_MACH_PGTABLE_H
-+
-+extern pgd_t swapper_pg_dir[1024];
-+
-+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-+
-+#ifndef CONFIG_X86_PAE
-+#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
-+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+#else
-+static inline unsigned long pte_pfn(pte_t pte)
-+{
-+ return (pte.pte_low >> PAGE_SHIFT) |
-+ (pte.pte_high << (32 - PAGE_SHIFT));
-+}
-+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-+{
-+ pte_t pte;
-+
-+ pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
-+ (pgprot_val(pgprot) >> 32);
-+ pte.pte_high &= (__supported_pte_mask >> 32);
-+ pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
-+ __supported_pte_mask;
-+ return pte;
-+}
-+#endif
-+
-+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-+
-+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-+ do { \
-+ if (__dirty) { \
-+ (__ptep)->pte_low = (__entry).pte_low; \
-+ flush_tlb_page(__vma, __address); \
-+ } \
-+ } while (0)
-+
-+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
-+ remap_pfn_range(vma, vaddr, pfn, size, prot)
-+
-+#endif /* __ASM_MACH_PGTABLE_H */
-diff --git a/include/asm-i386/mach-default/mach_processor.h b/include/asm-i386/mach-default/mach_processor.h
-new file mode 100644
-index 0000000..7d6b3b2
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_processor.h
-@@ -0,0 +1,53 @@
-+#ifndef __ASM_MACH_PROCESSOR_H
-+#define __ASM_MACH_PROCESSOR_H
-+
-+static inline void set_in_cr4(unsigned long mask)
-+{
-+ unsigned cr4;
-+ mmu_cr4_features |= mask;
-+ cr4 = read_cr4();
-+ cr4 |= mask;
-+ write_cr4(cr4);
-+}
-+
-+static inline void clear_in_cr4(unsigned long mask)
-+{
-+ unsigned cr4;
-+ mmu_cr4_features &= ~mask;
-+ cr4 = read_cr4();
-+ cr4 &= ~mask;
-+ write_cr4(cr4);
-+}
-+
-+static inline void mach_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-+{
-+}
-+
-+/*
-+ * These special macros can be used to get or set a debugging register
-+ */
-+#define get_debugreg(var, register) \
-+ __asm__("movl %%db" #register ", %0" \
-+ :"=r" (var))
-+#define set_debugreg(value, register) \
-+ __asm__("movl %0,%%db" #register \
-+ : /* no output */ \
-+ :"r" (value))
-+
-+/*
-+ * Set IOPL bits in EFLAGS from given mask
-+ */
-+static inline void set_iopl_mask(unsigned mask)
-+{
-+ unsigned int reg;
-+ __asm__ __volatile__ ("pushfl;"
-+ "popl %0;"
-+ "andl %1, %0;"
-+ "orl %2, %0;"
-+ "pushl %0;"
-+ "popfl"
-+ : "=&r" (reg)
-+ : "i" (~X86_EFLAGS_IOPL), "r" (mask));
-+}
-+
-+#endif /* __ASM_MACH_PROCESSOR_H */
-diff --git a/include/asm-i386/mach-default/mach_seg.h b/include/asm-i386/mach-default/mach_seg.h
-new file mode 100644
-index 0000000..5c7f021
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_seg.h
-@@ -0,0 +1,42 @@
-+#ifndef __ASM_MACH_SEG_H
-+#define __ASM_MACH_SEG_H
-+
-+#define GET_KERNEL_CS() (__KERNEL_CS)
-+#define GET_KERNEL_DS() (__KERNEL_DS)
-+
-+#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
-+#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
-+
-+#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
-+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
-+
-+#define GDT_ENTRY_DOUBLEFAULT_TSS 31
-+
-+/*
-+ * The GDT has 32 entries
-+ */
-+#define GDT_ENTRIES 32
-+
-+/* Simple and small GDT entries for booting only */
-+
-+#define GDT_ENTRY_BOOT_CS 2
-+#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
-+
-+#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
-+#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
-+
-+/* The PnP BIOS entries in the GDT */
-+#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
-+#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
-+#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
-+#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
-+#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
-+
-+/* The PnP BIOS selectors */
-+#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
-+#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
-+#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
-+#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
-+#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
-+
-+#endif /* __ASM_MACH_SEG_H */
-diff --git a/include/asm-i386/mach-default/mach_setup.h b/include/asm-i386/mach-default/mach_setup.h
-new file mode 100644
-index 0000000..a24cded
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_setup.h
-@@ -0,0 +1,50 @@
-+#ifndef __ASM_MACH_SETUP_H
-+#define __ASM_MACH_SETUP_H
-+
-+#define PARAM_SIZE 4096
-+
-+#define OLD_CL_MAGIC_ADDR 0x90020
-+#define OLD_CL_MAGIC 0xA33F
-+#define OLD_CL_BASE_ADDR 0x90000
-+#define OLD_CL_OFFSET 0x90022
-+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
-+
-+#ifndef __ASSEMBLY__
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+extern unsigned char boot_params[PARAM_SIZE];
-+
-+#define PARAM (boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
-+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
-+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
-+#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
-+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
-+#define INITRD_START (*(unsigned long *) (PARAM+0x218))
-+#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
-+#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
-+#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* __ASM_MACH_SETUP_H */
-diff --git a/include/asm-i386/mach-default/mach_system.h b/include/asm-i386/mach-default/mach_system.h
-new file mode 100644
-index 0000000..462c8d7
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_system.h
-@@ -0,0 +1,87 @@
-+#ifndef __ASM_MACH_SYSTEM_H
-+#define __ASM_MACH_SYSTEM_H
-+
-+#ifdef __KERNEL__
-+
-+#define read_cr0() ({ \
-+ unsigned int __dummy; \
-+ __asm__ __volatile__( \
-+ "movl %%cr0,%0\n\t" \
-+ :"=r" (__dummy)); \
-+ __dummy; \
-+})
-+#define write_cr0(x) \
-+ __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
-+
-+#define read_cr2() ({ \
-+ unsigned int __dummy; \
-+ __asm__ __volatile__( \
-+ "movl %%cr2,%0\n\t" \
-+ :"=r" (__dummy)); \
-+ __dummy; \
-+})
-+#define write_cr2(x) \
-+ __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
-+
-+#define read_cr3() ({ \
-+ unsigned int __dummy; \
-+ __asm__ ( \
-+ "movl %%cr3,%0\n\t" \
-+ :"=r" (__dummy)); \
-+ __dummy; \
-+})
-+#define write_cr3(x) \
-+ __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
-+
-+#define read_cr4() ({ \
-+ unsigned int __dummy; \
-+ __asm__( \
-+ "movl %%cr4,%0\n\t" \
-+ :"=r" (__dummy)); \
-+ __dummy; \
-+})
-+
-+#define read_cr4_safe() ({ \
-+ unsigned int __dummy; \
-+ /* This could fault if %cr4 does not exist */ \
-+ __asm__("1: movl %%cr4, %0 \n" \
-+ "2: \n" \
-+ ".section __ex_table,\"a\" \n" \
-+ ".long 1b,2b \n" \
-+ ".previous \n" \
-+ : "=r" (__dummy): "0" (0)); \
-+ __dummy; \
-+})
-+
-+#define write_cr4(x) \
-+ __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
-+
-+/*
-+ * Clear and set 'TS' bit respectively
-+ */
-+#define clts() __asm__ __volatile__ ("clts")
-+#define stts() write_cr0(8 | read_cr0())
-+
-+#endif /* __KERNEL__ */
-+
-+/* interrupt control.. */
-+#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
-+#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
-+#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
-+#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
-+/* used in the idle loop; sti takes one instruction cycle to complete */
-+#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
-+/* used when interrupts are already enabled or to shutdown the processor */
-+#define halt() __asm__ __volatile__("hlt": : :"memory")
-+
-+#define irqs_disabled() \
-+({ \
-+ unsigned long flags; \
-+ local_save_flags(flags); \
-+ !(flags & (1<<9)); \
-+})
-+
-+/* For spinlocks etc */
-+#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
-+
-+#endif /* __ASM_MACH_SYSTEM_H */
-diff --git a/include/asm-i386/mach-default/mach_tlbflush.h b/include/asm-i386/mach-default/mach_tlbflush.h
-new file mode 100644
-index 0000000..a8eda34
---- /dev/null
-+++ b/include/asm-i386/mach-default/mach_tlbflush.h
-@@ -0,0 +1,59 @@
-+#ifndef __ASM_MACH_TLBFLUSH_H
-+#define __ASM_MACH_TLBFLUSH_H
-+
-+#define __flush_tlb() \
-+ do { \
-+ unsigned int tmpreg; \
-+ \
-+ __asm__ __volatile__( \
-+ "movl %%cr3, %0; \n" \
-+ "movl %0, %%cr3; # flush TLB \n" \
-+ : "=r" (tmpreg) \
-+ :: "memory"); \
-+ } while (0)
-+
-+/*
-+ * Global pages have to be flushed a bit differently. Not a real
-+ * performance problem because this does not happen often.
-+ */
-+#define __flush_tlb_global() \
-+ do { \
-+ unsigned int tmpreg, cr4, cr4_orig; \
-+ \
-+ __asm__ __volatile__( \
-+ "movl %%cr4, %2; # turn off PGE \n" \
-+ "movl %2, %1; \n" \
-+ "andl %3, %1; \n" \
-+ "movl %1, %%cr4; \n" \
-+ "movl %%cr3, %0; \n" \
-+ "movl %0, %%cr3; # flush TLB \n" \
-+ "movl %2, %%cr4; # turn PGE back on \n" \
-+ : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
-+ : "i" (~X86_CR4_PGE) \
-+ : "memory"); \
-+ } while (0)
-+
-+#define __flush_tlb_all() \
-+ do { \
-+ if (cpu_has_pge) \
-+ __flush_tlb_global(); \
-+ else \
-+ __flush_tlb(); \
-+ } while (0)
-+
-+#define __flush_tlb_single(addr) \
-+ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
-+
-+#ifdef CONFIG_X86_INVLPG
-+# define __flush_tlb_one(addr) __flush_tlb_single(addr)
-+#else
-+# define __flush_tlb_one(addr) \
-+ do { \
-+ if (cpu_has_invlpg) \
-+ __flush_tlb_single(addr); \
-+ else \
-+ __flush_tlb(); \
-+ } while (0)
-+#endif
-+
-+#endif /* __ASM_MACH_TLBFLUSH_H */
-diff --git a/include/asm-i386/mach-default/mach_traps.h b/include/asm-i386/mach-default/mach_traps.h
-index 625438b..4e34a91 100644
---- a/include/asm-i386/mach-default/mach_traps.h
-+++ b/include/asm-i386/mach-default/mach_traps.h
-@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
- outb(reason, 0x61);
- }
-
-+static inline void clear_io_check_error(unsigned char reason)
-+{
-+ unsigned long i;
-+
-+ reason = (reason & 0xf) | 8;
-+ outb(reason, 0x61);
-+ i = 2000;
-+ while (--i) udelay(1000);
-+ reason &= ~8;
-+ outb(reason, 0x61);
-+}
-+
- static inline unsigned char get_nmi_reason(void)
- {
- return inb(0x61);
-diff --git a/include/asm-i386/mach-xen/irq_vectors.h b/include/asm-i386/mach-xen/irq_vectors.h
-new file mode 100644
-index 0000000..49e5ac0
---- /dev/null
-+++ b/include/asm-i386/mach-xen/irq_vectors.h
-@@ -0,0 +1,125 @@
-+/*
-+ * This file should contain #defines for all of the interrupt vector
-+ * numbers used by this architecture.
-+ *
-+ * In addition, there are some standard defines:
-+ *
-+ * FIRST_EXTERNAL_VECTOR:
-+ * The first free place for external interrupts
-+ *
-+ * SYSCALL_VECTOR:
-+ * The IRQ vector a syscall makes the user to kernel transition
-+ * under.
-+ *
-+ * TIMER_IRQ:
-+ * The IRQ number the timer interrupt comes in at.
-+ *
-+ * NR_IRQS:
-+ * The total number of interrupt vectors (including all the
-+ * architecture specific interrupts) needed.
-+ *
-+ */
-+#ifndef _ASM_IRQ_VECTORS_H
-+#define _ASM_IRQ_VECTORS_H
-+
-+/*
-+ * IDT vectors usable for external interrupt sources start
-+ * at 0x20:
-+ */
-+#define FIRST_EXTERNAL_VECTOR 0x20
-+
-+#define SYSCALL_VECTOR 0x80
-+
-+/*
-+ * Vectors 0x20-0x2f are used for ISA interrupts.
-+ */
-+
-+#if 0
-+/*
-+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
-+ *
-+ * some of the following vectors are 'rare', they are merged
-+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
-+ * TLB, reschedule and local APIC vectors are performance-critical.
-+ *
-+ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
-+ */
-+#define SPURIOUS_APIC_VECTOR 0xff
-+#define ERROR_APIC_VECTOR 0xfe
-+#define INVALIDATE_TLB_VECTOR 0xfd
-+#define RESCHEDULE_VECTOR 0xfc
-+#define CALL_FUNCTION_VECTOR 0xfb
-+
-+#define THERMAL_APIC_VECTOR 0xf0
-+/*
-+ * Local APIC timer IRQ vector is on a different priority level,
-+ * to work around the 'lost local interrupt if more than 2 IRQ
-+ * sources per level' errata.
-+ */
-+#define LOCAL_TIMER_VECTOR 0xef
-+#endif
-+
-+#define SPURIOUS_APIC_VECTOR 0xff
-+#define ERROR_APIC_VECTOR 0xfe
-+
-+/*
-+ * First APIC vector available to drivers: (vectors 0x30-0xee)
-+ * we start at 0x31 to spread out vectors evenly between priority
-+ * levels. (0x80 is the syscall vector)
-+ */
-+#define FIRST_DEVICE_VECTOR 0x31
-+#define FIRST_SYSTEM_VECTOR 0xef
-+
-+/*
-+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
-+ * Right now the APIC is mostly only used for SMP.
-+ * 256 vectors is an architectural limit. (we can have
-+ * more than 256 devices theoretically, but they will
-+ * have to use shared interrupts)
-+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
-+ * the usable vector space is 0x20-0xff (224 vectors)
-+ */
-+
-+#define RESCHEDULE_VECTOR 0
-+#define CALL_FUNCTION_VECTOR 1
-+#define NR_IPIS 2
-+
-+/*
-+ * The maximum number of vectors supported by i386 processors
-+ * is limited to 256. For processors other than i386, NR_VECTORS
-+ * should be changed accordingly.
-+ */
-+#define NR_VECTORS 256
-+
-+#define FPU_IRQ 13
-+
-+#define FIRST_VM86_IRQ 3
-+#define LAST_VM86_IRQ 15
-+#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
-+
-+/*
-+ * The flat IRQ space is divided into two regions:
-+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
-+ * if we have physical device-access privilege. This region is at the
-+ * start of the IRQ space so that existing device drivers do not need
-+ * to be modified to translate physical IRQ numbers into our IRQ space.
-+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ * are bound using the provided bind/unbind functions.
-+ */
-+
-+#define PIRQ_BASE 0
-+#define NR_PIRQS 256
-+
-+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS 256
-+
-+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS NR_IRQS
-+
-+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
-+
-+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
-+
-+#endif /* _ASM_IRQ_VECTORS_H */
-diff --git a/include/asm-i386/mach-xen/mach_dma_map.h b/include/asm-i386/mach-xen/mach_dma_map.h
-new file mode 100644
-index 0000000..c690f3f
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_dma_map.h
-@@ -0,0 +1,47 @@
-+#ifndef __ASM_MACH_DMA_MAP_H
-+#define __ASM_MACH_DMA_MAP_H
-+
-+static inline int
-+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-+{
-+ dma_addr_t mask = 0xffffffff;
-+ /* If the device has a mask, use it, otherwise default to 32 bits */
-+ if (hwdev && hwdev->dma_mask)
-+ mask = *hwdev->dma_mask;
-+ return (addr & ~mask) != 0;
-+}
-+
-+static inline int
-+range_straddles_page_boundary(void *p, size_t size)
-+{
-+ extern unsigned long *contiguous_bitmap;
-+ return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-+ !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
-+}
-+
-+extern dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+ enum dma_data_direction direction);
-+extern void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+ enum dma_data_direction direction);
-+
-+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, enum dma_data_direction direction);
-+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, enum dma_data_direction direction);
-+
-+extern dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+ size_t size, enum dma_data_direction direction);
-+extern void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+ enum dma_data_direction direction);
-+
-+extern int
-+dma_mapping_error(dma_addr_t dma_addr);
-+
-+extern int
-+dma_supported(struct device *dev, u64 mask);
-+
-+#endif /* __ASM_MACH_DMA_MAP_H */
-diff --git a/include/asm-i386/mach-xen/mach_fixmap.h b/include/asm-i386/mach-xen/mach_fixmap.h
-new file mode 100644
-index 0000000..018c82f
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_fixmap.h
-@@ -0,0 +1,15 @@
-+#ifndef __ASM_MACH_FIXMAP_H
-+#define __ASM_MACH_FIXMAP_H
-+
-+/* used by vmalloc.c, vsyscall.lds.S.
-+ *
-+ * Leave one empty page between vmalloc'ed areas and
-+ * the start of the fixmap.
-+ */
-+#define __FIXADDR_TOP (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
-+
-+#ifndef __ASSEMBLY__
-+#include <xen/gnttab.h>
-+#endif
-+
-+#endif /* __ASM_MACH_FIXMAP_H */
-diff --git a/include/asm-i386/mach-xen/mach_io.h b/include/asm-i386/mach-xen/mach_io.h
-new file mode 100644
-index 0000000..a026e52
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_io.h
-@@ -0,0 +1,46 @@
-+#ifndef __ASM_MACH_IO_H
-+#define __ASM_MACH_IO_H
-+
-+/*
-+ * Change "struct page" to physical address.
-+ */
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
-+
-+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
-+ (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
-+ (unsigned long) (bv)->bv_offset)
-+
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
-+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+ bvec_to_pseudophys((vec2))))
-+
-+/*
-+ * ISA space is 'always mapped' on a typical x86 system, no need to
-+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
-+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-+ * are physical addresses. The following constant pointer can be
-+ * used as the IO-area pointer (it can be iounmapped as well, so the
-+ * analogy with PCI is quite large):
-+ */
-+#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
-+
-+/*
-+ * ISA I/O bus memory addresses are 1:1 with the physical address.
-+ */
-+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
-+
-+/*
-+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
-+ * are forbidden in portable PCI drivers.
-+ *
-+ * Allow them on x86 for legacy drivers, though.
-+ */
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+
-+#endif /* __ASM_MACH_XLAT_H */
-diff --git a/include/asm-i386/mach-xen/mach_mmu.h b/include/asm-i386/mach-xen/mach_mmu.h
-new file mode 100644
-index 0000000..b50dbb5
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_mmu.h
-@@ -0,0 +1,47 @@
-+#ifndef __ASM_MACH_MMU_H
-+#define __ASM_MACH_MMU_H
-+
-+#define prepare_arch_switch(next) __prepare_arch_switch()
-+
-+static inline void __prepare_arch_switch(void)
-+{
-+ /*
-+ * Save away %fs and %gs. No need to save %es and %ds, as those
-+ * are always kernel segments while inside the kernel. Must
-+ * happen before reload of cr3/ldt (i.e., not in __switch_to).
-+ */
-+ asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
-+ : "=m" (current->thread.fs),
-+ "=m" (current->thread.gs));
-+ asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
-+ : : "r" (0) );
-+}
-+
-+extern void mm_pin(struct mm_struct *mm);
-+extern void mm_unpin(struct mm_struct *mm);
-+void mm_pin_all(void);
-+
-+#define MACH_SWITCH_DECLS struct mmuext_op ops[2], *op = ops
-+
-+static inline struct mmuext_op *mach_switch_pgd(struct mm_struct *next, int cpu, struct mmuext_op *op)
-+{
-+ if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
-+ mm_pin(next);
-+ op->cmd = MMUEXT_NEW_BASEPTR;
-+ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
-+ return op + 1;
-+}
-+#define mach_switch_pgd(next, cpu) ((void)(op = mach_switch_pgd(next, cpu, op)))
-+
-+static inline struct mmuext_op *mach_switch_ldt(struct mm_struct *next, int cpu, struct mmuext_op *op)
-+{
-+ op->cmd = MMUEXT_SET_LDT;
-+ op->arg1.linear_addr = (unsigned long)next->context.ldt;
-+ op->arg2.nr_ents = next->context.size;
-+ return op + 1;
-+}
-+#define mach_switch_ldt(next, cpu) ((void)(op = mach_switch_ldt(next, cpu, op)))
-+
-+#define mach_switch_commit() HYPERVISOR_mmuext_op(ops, op - ops, NULL, DOMID_SELF)
-+
-+#endif /* __ASM_MACH_MMU_H */
-diff --git a/include/asm-i386/mach-xen/mach_mode.h b/include/asm-i386/mach-xen/mach_mode.h
-new file mode 100644
-index 0000000..792cabd
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_mode.h
-@@ -0,0 +1,7 @@
-+#ifndef __ASM_MACH_MODE_H
-+#define __ASM_MACH_MODE_H
-+
-+#define KERNEL_RPL 1
-+#define USER_MODE_MASK 2
-+
-+#endif /* __ASM_MACH_MODE_H */
-diff --git a/include/asm-i386/mach-xen/mach_page.h b/include/asm-i386/mach-xen/mach_page.h
-new file mode 100644
-index 0000000..274aedd
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_page.h
-@@ -0,0 +1,205 @@
-+#ifndef __ASM_MACH_PAGE_H
-+#define __ASM_MACH_PAGE_H
-+
-+#ifndef __ASSEMBLY__
-+
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <asm/bug.h>
-+#include <xen/interface/xen.h>
-+#include <xen/features.h>
-+#include <xen/foreign_page.h>
-+
-+#define arch_free_page(_page,_order) \
-+({ int foreign = PageForeign(_page); \
-+ if (foreign) \
-+ (PageForeignDestructor(_page))(_page); \
-+ foreign; \
-+})
-+#define HAVE_ARCH_FREE_PAGE
-+
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
-+
-+#ifndef CONFIG_X86_USE_3DNOW
-+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
-+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-+#endif
-+
-+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-+#define INVALID_P2M_ENTRY (~0UL)
-+#define FOREIGN_FRAME_BIT (1UL<<31)
-+#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
-+
-+extern unsigned long *phys_to_machine_mapping;
-+
-+static inline unsigned long pfn_to_mfn(unsigned long pfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return pfn;
-+ return phys_to_machine_mapping[(unsigned int)(pfn)] &
-+ ~FOREIGN_FRAME_BIT;
-+}
-+
-+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return 1;
-+ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
-+}
-+
-+static inline unsigned long mfn_to_pfn(unsigned long mfn)
-+{
-+ unsigned long pfn;
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return mfn;
-+
-+ /*
-+ * The array access can fail (e.g., device space beyond end of RAM).
-+ * In such cases it doesn't matter what we return (we return garbage),
-+ * but we must handle the fault without crashing!
-+ */
-+ asm (
-+ "1: movl %1,%0\n"
-+ "2:\n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 4\n"
-+ " .long 1b,2b\n"
-+ ".previous"
-+ : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
-+
-+ return pfn;
-+}
-+
-+/*
-+ * We detect special mappings in one of two ways:
-+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
-+ * to be outside our maximum possible pseudophys range.
-+ * 2. If the MFN belongs to a different domain then we will certainly
-+ * not have MFN in our p2m table. Conversely, if the page is ours,
-+ * then we'll have p2m(m2p(MFN))==MFN.
-+ * If we detect a special mapping then it doesn't have a 'struct page'.
-+ * We force !pfn_valid() by returning an out-of-range pointer.
-+ *
-+ * NB. These checks require that, for any MFN that is not in our reservation,
-+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
-+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
-+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
-+ *
-+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
-+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
-+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
-+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
-+ */
-+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
-+{
-+ extern unsigned long max_mapnr;
-+ unsigned long pfn = mfn_to_pfn(mfn);
-+ if ((pfn < max_mapnr)
-+ && !xen_feature(XENFEAT_auto_translated_physmap)
-+ && (phys_to_machine_mapping[pfn] != mfn))
-+ return max_mapnr; /* force !pfn_valid() */
-+ return pfn;
-+}
-+
-+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-+ return;
-+ }
-+ phys_to_machine_mapping[pfn] = mfn;
-+}
-+
-+/* Definitions for machine and pseudophysical addresses. */
-+#ifdef CONFIG_X86_PAE
-+typedef unsigned long long paddr_t;
-+typedef unsigned long long maddr_t;
-+#else
-+typedef unsigned long paddr_t;
-+typedef unsigned long maddr_t;
-+#endif
-+
-+static inline maddr_t phys_to_machine(paddr_t phys)
-+{
-+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-+ return machine;
-+}
-+static inline paddr_t machine_to_phys(maddr_t machine)
-+{
-+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-+ return phys;
-+}
-+
-+#ifdef CONFIG_X86_PAE
-+#define __pte(x) ({ unsigned long long _x = (x); \
-+ if (_x & 1) _x = phys_to_machine(_x); \
-+ ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
-+#define __pgd(x) ({ unsigned long long _x = (x); \
-+ (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
-+#define __pmd(x) ({ unsigned long long _x = (x); \
-+ (((_x)&1) ? ((pmd_t) {phys_to_machine(_x)}) : ((pmd_t) {(_x)})); })
-+static inline unsigned long long pte_val(pte_t x)
-+{
-+ unsigned long long ret;
-+
-+ if (x.pte_low) {
-+ ret = x.pte_low | (unsigned long long)x.pte_high << 32;
-+ ret = machine_to_phys(ret) | 1;
-+ } else {
-+ ret = 0;
-+ }
-+ return ret;
-+}
-+static inline unsigned long long pmd_val(pmd_t x)
-+{
-+ unsigned long long ret = x.pmd;
-+ if (ret) ret = machine_to_phys(ret) | 1;
-+ return ret;
-+}
-+static inline unsigned long long pgd_val(pgd_t x)
-+{
-+ unsigned long long ret = x.pgd;
-+ if (ret) ret = machine_to_phys(ret) | 1;
-+ return ret;
-+}
-+static inline unsigned long long pte_val_ma(pte_t x)
-+{
-+ return (unsigned long long)x.pte_high << 32 | x.pte_low;
-+}
-+#else
-+#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
-+ (x).pte_low)
-+#define pte_val_ma(x) ((x).pte_low)
-+#define __pte(x) ({ unsigned long _x = (x); \
-+ (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); })
-+#define __pgd(x) ({ unsigned long _x = (x); \
-+ (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
-+static inline unsigned long pgd_val(pgd_t x)
-+{
-+ unsigned long ret = x.pgd;
-+ if (ret) ret = machine_to_phys(ret) | 1;
-+ return ret;
-+}
-+#endif
-+
-+#define __pte_ma(x) ((pte_t) { (x) } )
-+
-+/* VIRT <-> MACHINE conversion */
-+#define virt_to_machine(v) (phys_to_machine(__pa(v)))
-+#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
-+#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#undef LOAD_OFFSET
-+#define LOAD_OFFSET 0
-+
-+#endif /* __ASM_MACH_PAGE_H */
-diff --git a/include/asm-i386/mach-xen/mach_pgtable.h b/include/asm-i386/mach-xen/mach_pgtable.h
-new file mode 100644
-index 0000000..3e2a1db
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_pgtable.h
-@@ -0,0 +1,119 @@
-+#ifndef __ASM_MACH_PGTABLE_H
-+#define __ASM_MACH_PGTABLE_H
-+
-+#include <asm/hypervisor.h>
-+
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
-+extern pgd_t *swapper_pg_dir;
-+
-+#define set_pte_at(_mm,addr,ptep,pteval) do { \
-+ if (((_mm) != current->mm && (_mm) != &init_mm) || \
-+ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
-+ set_pte((ptep), (pteval)); \
-+} while (0)
-+
-+#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
-+ if (((_mm) != current->mm && (_mm) != &init_mm) || \
-+ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
-+ set_pte((ptep), (pteval)); \
-+ xen_invlpg((addr)); \
-+ } \
-+} while (0)
-+
-+#ifndef CONFIG_X86_PAE
-+#define PTRS_PER_PGD_NO_HV (HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
-+#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
-+#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+#else
-+#define PTRS_PER_PGD_NO_HV PTRS_PER_PGD
-+#define pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) |\
-+ (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)))
-+static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
-+{
-+ pte_t pte;
-+
-+ pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
-+ (pgprot_val(pgprot) >> 32);
-+ pte.pte_high &= (__supported_pte_mask >> 32);
-+ pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
-+ __supported_pte_mask;
-+ return pte;
-+}
-+#endif
-+
-+#define pte_pfn(_pte) mfn_to_local_pfn(pte_mfn(_pte))
-+
-+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-+{
-+ return pfn_pte_ma(pfn_to_mfn(page_nr), pgprot);
-+}
-+
-+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-+ can temporarily clear it. */
-+#define pmd_present(x) (pmd_val(x))
-+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
-+
-+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-+ do { \
-+ if (__dirty) { \
-+ if ( likely((__vma)->vm_mm == current->mm) ) { \
-+ BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
-+ } else { \
-+ xen_l1_entry_update((__ptep), (__entry)); \
-+ flush_tlb_page((__vma), (__address)); \
-+ } \
-+ } \
-+ } while (0)
-+
-+#define __HAVE_ARCH_PTEP_ESTABLISH
-+#define ptep_establish(__vma, __address, __ptep, __entry) \
-+do { \
-+ ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
-+} while (0)
-+
-+#include <xen/features.h>
-+void make_lowmem_page_readonly(void *va, unsigned int feature);
-+void make_lowmem_page_writable(void *va, unsigned int feature);
-+void make_page_readonly(void *va, unsigned int feature);
-+void make_page_writable(void *va, unsigned int feature);
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
-+
-+#define virt_to_ptep(__va) \
-+({ \
-+ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
-+ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
-+ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
-+ pte_offset_kernel(__pmd, (unsigned long)(__va)); \
-+})
-+
-+#define arbitrary_virt_to_machine(__va) \
-+({ \
-+ maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
-+ m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
-+})
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+ unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid);
-+int direct_kernel_remap_pfn_range(unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid);
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+ unsigned long address,
-+ uint64_t *ptep);
-+int touch_pte_range(struct mm_struct *mm,
-+ unsigned long address,
-+ unsigned long size);
-+
-+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-+direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
-+
-+#endif /* __ASM_MACH_PGTABLE_H */
-diff --git a/include/asm-i386/mach-xen/mach_processor.h b/include/asm-i386/mach-xen/mach_processor.h
-new file mode 100644
-index 0000000..4080373
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_processor.h
-@@ -0,0 +1,57 @@
-+#ifndef __ASM_MACH_PROCESSOR_H
-+#define __ASM_MACH_PROCESSOR_H
-+
-+#include <xen/interface/physdev.h>
-+
-+static inline void __unsupported_cr4(void)
-+{
-+ const char *msg = "Xen unsupported cr4 update\n";
-+
-+ (void)HYPERVISOR_console_io(
-+ CONSOLEIO_write, __builtin_strlen(msg), (char *)msg);
-+ BUG();
-+}
-+
-+static inline void set_in_cr4(unsigned long mask)
-+{
-+ mmu_cr4_features |= mask;
-+ switch (mask) {
-+ case X86_CR4_OSFXSR:
-+ case X86_CR4_OSXMMEXCPT:
-+ break;
-+ default:
-+ __unsupported_cr4();
-+ }
-+}
-+
-+static inline void clear_in_cr4(unsigned long mask)
-+{
-+ mmu_cr4_features &= ~mask;
-+ __unsupported_cr4();
-+}
-+
-+#define mach_load_esp0(tss, thread) \
-+ HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
-+
-+/*
-+ * These special macros can be used to get or set a debugging register
-+ */
-+#define get_debugreg(var, register) \
-+ (var) = HYPERVISOR_get_debugreg((register))
-+#define set_debugreg(value, register) \
-+ HYPERVISOR_set_debugreg((register), (value))
-+
-+/*
-+ * Set IOPL bits in EFLAGS from given mask
-+ */
-+static inline void set_iopl_mask(unsigned mask)
-+{
-+ physdev_op_t op;
-+
-+ /* Force the change at ring 0. */
-+ op.cmd = PHYSDEVOP_SET_IOPL;
-+ op.u.set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
-+ HYPERVISOR_physdev_op(&op);
-+}
-+
-+#endif /* __ASM_MACH_PROCESSOR_H */
-diff --git a/include/asm-i386/mach-xen/mach_seg.h b/include/asm-i386/mach-xen/mach_seg.h
-new file mode 100644
-index 0000000..aa7f5a2
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_seg.h
-@@ -0,0 +1,12 @@
-+#ifndef __ASM_MACH_SEG_H
-+#define __ASM_MACH_SEG_H
-+
-+#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
-+#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
-+
-+/*
-+ * The GDT has 18 entries
-+ */
-+#define GDT_ENTRIES 18
-+
-+#endif /* __ASM_MACH_SEG_H */
-diff --git a/include/asm-i386/mach-xen/mach_setup.h b/include/asm-i386/mach-xen/mach_setup.h
-new file mode 100644
-index 0000000..8f4cd35
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_setup.h
-@@ -0,0 +1,11 @@
-+#ifndef __ASM_MACH_SETUP_H
-+#define __ASM_MACH_SETUP_H
-+
-+#ifndef __ASSEMBLY__
-+
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* __ASM_MACH_SETUP_H */
-diff --git a/include/asm-i386/mach-xen/mach_system.h b/include/asm-i386/mach-xen/mach_system.h
-new file mode 100644
-index 0000000..d364816
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_system.h
-@@ -0,0 +1,157 @@
-+#ifndef __ASM_MACH_SYSTEM_H
-+#define __ASM_MACH_SYSTEM_H
-+
-+#ifdef __KERNEL__
-+
-+#include <asm/hypervisor.h>
-+
-+#ifdef CONFIG_SMP
-+#define __vcpu_id smp_processor_id()
-+#else
-+#define __vcpu_id 0
-+#endif
-+
-+#define read_cr0() ({ \
-+ unsigned int __dummy; \
-+ __asm__ __volatile__( \
-+ "movl %%cr0,%0\n\t" \
-+ :"=r" (__dummy)); \
-+ __dummy; \
-+})
-+#define write_cr0(x) BUG()
-+
-+#define read_cr2() \
-+ (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
-+#define write_cr2(x) \
-+ __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
-+
-+#define read_cr3() ({ \
-+ unsigned int __dummy; \
-+ __asm__ ( \
-+ "movl %%cr3,%0\n\t" \
-+ :"=r" (__dummy)); \
-+ machine_to_phys(__dummy); \
-+})
-+#define write_cr3(x) ({ \
-+ maddr_t __dummy = phys_to_machine(x); \
-+ __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \
-+})
-+
-+#define read_cr4() ({ \
-+ unsigned int __dummy; \
-+ __asm__( \
-+ "movl %%cr4,%0\n\t" \
-+ :"=r" (__dummy)); \
-+ __dummy; \
-+})
-+
-+#define read_cr4_safe() ({ \
-+ unsigned int __dummy; \
-+ /* This could fault if %cr4 does not exist */ \
-+ __asm__("1: movl %%cr4, %0 \n" \
-+ "2: \n" \
-+ ".section __ex_table,\"a\" \n" \
-+ ".long 1b,2b \n" \
-+ ".previous \n" \
-+ : "=r" (__dummy): "0" (0)); \
-+ __dummy; \
-+})
-+
-+#define write_cr4(x) BUG()
-+
-+/*
-+ * Clear and set 'TS' bit respectively
-+ */
-+#define clts() (HYPERVISOR_fpu_taskswitch(0))
-+#define stts() (HYPERVISOR_fpu_taskswitch(1))
-+
-+/* interrupt control.. */
-+
-+/*
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
-+
-+#define __cli() \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ _vcpu->evtchn_upcall_mask = 1; \
-+ preempt_enable_no_resched(); \
-+ barrier(); \
-+} while (0)
-+
-+#define __sti() \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ barrier(); \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ _vcpu->evtchn_upcall_mask = 0; \
-+ barrier(); /* unmask then check (avoid races) */ \
-+ if (unlikely(_vcpu->evtchn_upcall_pending)) \
-+ force_evtchn_callback(); \
-+ preempt_enable(); \
-+} while (0)
-+
-+#define __save_flags(x) \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ (x) = _vcpu->evtchn_upcall_mask; \
-+ preempt_enable(); \
-+} while (0)
-+
-+#define __restore_flags(x) \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ barrier(); \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
-+ barrier(); /* unmask then check (avoid races) */ \
-+ if (unlikely(_vcpu->evtchn_upcall_pending)) \
-+ force_evtchn_callback(); \
-+ preempt_enable(); \
-+ } else \
-+ preempt_enable_no_resched(); \
-+} while (0)
-+
-+#define safe_halt() ((void)0)
-+#define halt() ((void)0)
-+
-+#define __save_and_cli(x) \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ (x) = _vcpu->evtchn_upcall_mask; \
-+ _vcpu->evtchn_upcall_mask = 1; \
-+ preempt_enable_no_resched(); \
-+ barrier(); \
-+} while (0)
-+
-+#define local_irq_save(x) __save_and_cli(x)
-+#define local_irq_restore(x) __restore_flags(x)
-+#define local_save_flags(x) __save_flags(x)
-+#define local_irq_disable() __cli()
-+#define local_irq_enable() __sti()
-+
-+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
-+#define irqs_disabled() \
-+({ int ___x; \
-+ vcpu_info_t *_vcpu; \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ ___x = (_vcpu->evtchn_upcall_mask != 0); \
-+ preempt_enable_no_resched(); \
-+ ___x; })
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* __ASM_MACH_SYSTEM_H */
-diff --git a/include/asm-i386/mach-xen/mach_tlbflush.h b/include/asm-i386/mach-xen/mach_tlbflush.h
-new file mode 100644
-index 0000000..bce6ac2
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_tlbflush.h
-@@ -0,0 +1,10 @@
-+#ifndef __ASM_MACH_TLBFLUSH_H
-+#define __ASM_MACH_TLBFLUSH_H
-+
-+#define __flush_tlb() xen_tlb_flush()
-+#define __flush_tlb_global() xen_tlb_flush()
-+#define __flush_tlb_all() xen_tlb_flush()
-+#define __flush_tlb_single(addr) xen_invlpg(addr)
-+#define __flush_tlb_one(addr) __flush_tlb_single(addr)
-+
-+#endif /* __ASM_MACH_TLBFLUSH_H */
-diff --git a/include/asm-i386/mach-xen/mach_traps.h b/include/asm-i386/mach-xen/mach_traps.h
-new file mode 100644
-index 0000000..8daed2c
---- /dev/null
-+++ b/include/asm-i386/mach-xen/mach_traps.h
-@@ -0,0 +1,33 @@
-+/*
-+ * include/asm-xen/asm-i386/mach-xen/mach_traps.h
-+ *
-+ * Machine specific NMI handling for Xen
-+ */
-+#ifndef _MACH_TRAPS_H
-+#define _MACH_TRAPS_H
-+
-+#include <linux/bitops.h>
-+#include <xen/interface/nmi.h>
-+
-+static inline void clear_mem_error(unsigned char reason) {}
-+static inline void clear_io_check_error(unsigned char reason) {}
-+
-+static inline unsigned char get_nmi_reason(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ unsigned char reason = 0;
-+
-+ /* construct a value which looks like it came from
-+ * port 0x61.
-+ */
-+ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+ reason |= 0x40;
-+ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+ reason |= 0x80;
-+
-+ return reason;
-+}
-+
-+static inline void reassert_nmi(void) {}
-+
-+#endif /* !_MACH_TRAPS_H */
-diff --git a/include/asm-i386/mach-xen/setup_arch_post.h b/include/asm-i386/mach-xen/setup_arch_post.h
-new file mode 100644
-index 0000000..49c3f2a
---- /dev/null
-+++ b/include/asm-i386/mach-xen/setup_arch_post.h
-@@ -0,0 +1,50 @@
-+/**
-+ * machine_specific_memory_setup - Hook for machine specific memory setup.
-+ *
-+ * Description:
-+ * This is included late in kernel/setup.c so that it can make
-+ * use of all of the static functions.
-+ **/
-+
-+static char * __init machine_specific_memory_setup(void)
-+{
-+ unsigned long max_pfn = xen_start_info->nr_pages;
-+
-+ e820.nr_map = 0;
-+ add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM);
-+
-+ return "Xen";
-+}
-+
-+void __devinit machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
-+{
-+ clear_bit(X86_FEATURE_VME, c->x86_capability);
-+ clear_bit(X86_FEATURE_DE, c->x86_capability);
-+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
-+ clear_bit(X86_FEATURE_PGE, c->x86_capability);
-+ clear_bit(X86_FEATURE_SEP, c->x86_capability);
-+ if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+ clear_bit(X86_FEATURE_MTRR, c->x86_capability);
-+ c->hlt_works_ok = 0;
-+}
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
-+
-+static void __init machine_specific_arch_setup(void)
-+{
-+ struct xen_platform_parameters pp;
-+
-+ HYPERVISOR_set_callbacks(
-+ __KERNEL_CS, (unsigned long)hypervisor_callback,
-+ __KERNEL_CS, (unsigned long)failsafe_callback);
-+
-+ HYPERVISOR_nmi_op(XENNMI_register_callback, (unsigned long)&nmi);
-+
-+ machine_specific_modify_cpu_capabilities(&boot_cpu_data);
-+
-+ if (HYPERVISOR_xen_version(XENVER_platform_parameters,
-+ &pp) == 0)
-+ set_fixaddr_top(pp.virt_start - PAGE_SIZE);
-+}
-diff --git a/include/asm-i386/mach-xen/setup_arch_pre.h b/include/asm-i386/mach-xen/setup_arch_pre.h
-new file mode 100644
-index 0000000..b18df68
---- /dev/null
-+++ b/include/asm-i386/mach-xen/setup_arch_pre.h
-@@ -0,0 +1,5 @@
-+/* Hook to call BIOS initialisation function */
-+
-+#define ARCH_SETUP machine_specific_arch_setup();
-+
-+static void __init machine_specific_arch_setup(void);
-diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h
-index f431a0b..1d67576 100644
---- a/include/asm-i386/mmu.h
-+++ b/include/asm-i386/mmu.h
-@@ -14,4 +14,10 @@ typedef struct {
- void *ldt;
- } mm_context_t;
-
-+#ifdef CONFIG_X86_XEN
-+/* mm/memory.c:exit_mmap hook */
-+extern void _arch_exit_mmap(struct mm_struct *mm);
-+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
-+#endif
-+
- #endif
-diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
-index bf08218..ea0f78a 100644
---- a/include/asm-i386/mmu_context.h
-+++ b/include/asm-i386/mmu_context.h
-@@ -6,6 +6,7 @@
- #include <asm/atomic.h>
- #include <asm/pgalloc.h>
- #include <asm/tlbflush.h>
-+#include <mach_mmu.h>
-
- /*
- * Used for LDT copy/destruction.
-@@ -16,7 +17,7 @@ void destroy_context(struct mm_struct *m
-
- static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
- {
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_X86_XEN)
- unsigned cpu = smp_processor_id();
- if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
- per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
-@@ -28,26 +29,27 @@ static inline void switch_mm(struct mm_s
- struct task_struct *tsk)
- {
- int cpu = smp_processor_id();
-+ MACH_SWITCH_DECLS;
-
- if (likely(prev != next)) {
- /* stop flush ipis for the previous mm */
- cpu_clear(cpu, prev->cpu_vm_mask);
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_X86_XEN)
- per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
- per_cpu(cpu_tlbstate, cpu).active_mm = next;
- #endif
- cpu_set(cpu, next->cpu_vm_mask);
-
- /* Re-load page tables */
-- load_cr3(next->pgd);
-+ mach_switch_pgd(next, cpu);
-
- /*
- * load the LDT, if the LDT is different:
- */
- if (unlikely(prev->context.ldt != next->context.ldt))
-- load_LDT_nolock(&next->context, cpu);
-+ mach_switch_ldt(next, cpu);
- }
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_X86_XEN)
- else {
- per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
- BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
-@@ -56,11 +58,12 @@ static inline void switch_mm(struct mm_s
- /* We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload %cr3.
- */
-- load_cr3(next->pgd);
-- load_LDT_nolock(&next->context, cpu);
-+ mach_switch_pgd(next, cpu);
-+ mach_switch_ldt(next, cpu);
- }
- }
- #endif
-+ BUG_ON(mach_switch_commit());
- }
-
- #define deactivate_mm(tsk, mm) \
-diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
-index 997ca5d..226f35a 100644
---- a/include/asm-i386/page.h
-+++ b/include/asm-i386/page.h
-@@ -36,9 +36,6 @@
- #define clear_user_page(page, vaddr, pg) clear_page(page)
- #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-
--#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
--#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
--
- /*
- * These are used to make use of C type-checking..
- */
-@@ -49,16 +46,12 @@ typedef struct { unsigned long pte_low,
- typedef struct { unsigned long long pmd; } pmd_t;
- typedef struct { unsigned long long pgd; } pgd_t;
- typedef struct { unsigned long long pgprot; } pgprot_t;
--#define pmd_val(x) ((x).pmd)
--#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
--#define __pmd(x) ((pmd_t) { (x) } )
- #define HPAGE_SHIFT 21
- #else
- typedef struct { unsigned long pte_low; } pte_t;
- typedef struct { unsigned long pgd; } pgd_t;
- typedef struct { unsigned long pgprot; } pgprot_t;
- #define boot_pte_t pte_t /* or would you rather have a typedef */
--#define pte_val(x) ((x).pte_low)
- #define HPAGE_SHIFT 22
- #endif
- #define PTE_MASK PAGE_MASK
-@@ -70,15 +63,13 @@ typedef struct { unsigned long pgprot; }
- #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
- #endif
-
--#define pgd_val(x) ((x).pgd)
- #define pgprot_val(x) ((x).pgprot)
--
--#define __pte(x) ((pte_t) { (x) } )
--#define __pgd(x) ((pgd_t) { (x) } )
- #define __pgprot(x) ((pgprot_t) { (x) } )
-
- #endif /* !__ASSEMBLY__ */
-
-+#include <mach_page.h>
-+
- /* to align the pointer to the (next) page boundary */
- #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-@@ -121,7 +112,7 @@ extern int page_is_ram(unsigned long pag
-
- #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
- #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
--#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
-+#define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE)
- #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
- #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
- #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-@@ -139,6 +130,8 @@ extern int page_is_ram(unsigned long pag
- ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-+#define __HAVE_ARCH_GATE_AREA 1
-+
- #endif /* __KERNEL__ */
-
- #include <asm-generic/page.h>
-diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
-index 78c8598..ee1b338 100644
---- a/include/asm-i386/pci.h
-+++ b/include/asm-i386/pci.h
-@@ -43,6 +43,26 @@ int pcibios_set_irq_routing(struct pci_d
-
- struct pci_dev;
-
-+#ifdef CONFIG_SWIOTLB
-+
-+/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
-+#define PCI_DMA_BUS_IS_PHYS (0)
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
-+ dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
-+ __u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME) \
-+ ((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
-+ (((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME) \
-+ ((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
-+ (((PTR)->LEN_NAME) = (VAL))
-+
-+#else
-+
- /* The PCI address space does equal the physical memory
- * address space. The networking and block device layers use
- * this boolean for bounce buffer decisions.
-@@ -57,6 +77,8 @@ struct pci_dev;
- #define pci_unmap_len(PTR, LEN_NAME) (0)
- #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-
-+#endif
-+
- /* This is always fine. */
- #define pci_dac_dma_supported(pci_dev, mask) (1)
-
-@@ -111,10 +133,23 @@ static inline void pci_dma_burst_advice(
-
- #endif /* __KERNEL__ */
-
-+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
-+#include <xen/pcifront.h>
-+#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
-+
- /* implement the pci_ DMA API in terms of the generic device dma_ one */
- #include <asm-generic/pci-dma-compat.h>
-
- /* generic pci stuff */
- #include <asm-generic/pci.h>
-
-+#ifdef CONFIG_X86_XEN
-+/* On Xen we have to scan all functions since Xen hides bridges from
-+ * us. If a bridge is at fn=0 and that slot has a multifunction
-+ * device, we won't find the additional devices without scanning all
-+ * functions. */
-+#undef pcibios_scan_all_fns
-+#define pcibios_scan_all_fns(a, b) 1
-+#endif
-+
- #endif /* __i386_PCI_H */
-diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
-index 0380c3d..76a36fc 100644
---- a/include/asm-i386/pgalloc.h
-+++ b/include/asm-i386/pgalloc.h
-@@ -5,14 +5,37 @@
- #include <asm/fixmap.h>
- #include <linux/threads.h>
- #include <linux/mm.h> /* for struct page */
-+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
-
- #define pmd_populate_kernel(mm, pmd, pte) \
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
-
-+#ifndef CONFIG_X86_XEN
- #define pmd_populate(mm, pmd, pte) \
- set_pmd(pmd, __pmd(_PAGE_TABLE + \
- ((unsigned long long)page_to_pfn(pte) << \
- (unsigned long long) PAGE_SHIFT)))
-+#else
-+/* Is this pagetable pinned? */
-+#define PG_pinned PG_arch_1
-+#define pmd_populate(mm, pmd, pte) \
-+do { \
-+ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \
-+ if (!PageHighMem(pte)) \
-+ BUG_ON(HYPERVISOR_update_va_mapping( \
-+ (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT),\
-+ pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));\
-+ set_pmd(pmd, __pmd(_PAGE_TABLE + \
-+ ((unsigned long long)page_to_pfn(pte) << \
-+ (unsigned long long) PAGE_SHIFT))); \
-+ } else { \
-+ *(pmd) = __pmd(_PAGE_TABLE + \
-+ ((unsigned long long)page_to_pfn(pte) << \
-+ (unsigned long long) PAGE_SHIFT)); \
-+ } \
-+} while (0)
-+#endif
-+
- /*
- * Allocate and free page tables.
- */
-@@ -25,13 +48,19 @@ extern struct page *pte_alloc_one(struct
- static inline void pte_free_kernel(pte_t *pte)
- {
- free_page((unsigned long)pte);
-+#ifdef CONFIG_X86_XEN
-+ make_page_writable(pte, XENFEAT_writable_page_tables);
-+#endif
- }
-
-+#ifndef CONFIG_X86_XEN
- static inline void pte_free(struct page *pte)
- {
- __free_page(pte);
- }
--
-+#else
-+extern void pte_free(struct page *pte);
-+#endif
-
- #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
-
-diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
-index 74ef721..5c4c93a 100644
---- a/include/asm-i386/pgtable-2level.h
-+++ b/include/asm-i386/pgtable-2level.h
-@@ -14,16 +14,18 @@
- * hook is made available.
- */
- #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
--#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
- #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
--#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
-+#if !defined(CONFIG_X86_XEN)
-+# define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
-+#else
-+# define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
-+#endif
-
--#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0))
-+#define ptep_get_and_clear(mm,addr,xp) __pte_ma(xchg(&(xp)->pte_low, 0))
- #define pte_same(a, b) ((a).pte_low == (b).pte_low)
- #define pte_page(x) pfn_to_page(pte_pfn(x))
- #define pte_none(x) (!(x).pte_low)
--#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
--#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+
- #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
- /*
-diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
-index f1a8b45..5f45788 100644
---- a/include/asm-i386/pgtable-3level.h
-+++ b/include/asm-i386/pgtable-3level.h
-@@ -56,15 +56,21 @@ static inline void set_pte(pte_t *ptep,
- smp_wmb();
- ptep->pte_low = pte.pte_low;
- }
--#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
- #define __HAVE_ARCH_SET_PTE_ATOMIC
- #define set_pte_atomic(pteptr,pteval) \
-- set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
--#define set_pmd(pmdptr,pmdval) \
-+ set_64bit((unsigned long long *)(pteptr),pte_val_ma(pteval))
-+#if !defined(CONFIG_X86_XEN)
-+# define set_pmd(pmdptr,pmdval) \
- set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
--#define set_pud(pudptr,pudval) \
-+# define set_pud(pudptr,pudval) \
- (*(pudptr) = (pudval))
-+#else
-+# define set_pmd(pmdptr,pmdval) \
-+ xen_l2_entry_update((pmdptr), (pmdval))
-+# define set_pud(pudptr,pudval) \
-+ xen_l3_entry_update((pudptr), (pudval))
-+#endif
-
- /*
- * Pentium-II erratum A13: in PAE mode we explicitly have to flush
-@@ -109,28 +115,13 @@ static inline int pte_none(pte_t pte)
- return !pte.pte_low && !pte.pte_high;
- }
-
--static inline unsigned long pte_pfn(pte_t pte)
--{
-- return (pte.pte_low >> PAGE_SHIFT) |
-- (pte.pte_high << (32 - PAGE_SHIFT));
--}
--
- extern unsigned long long __supported_pte_mask;
-
--static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
--{
-- pte_t pte;
--
-- pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
-- (pgprot_val(pgprot) >> 32);
-- pte.pte_high &= (__supported_pte_mask >> 32);
-- pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
-- __supported_pte_mask;
-- return pte;
--}
--
- static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
- {
-+#ifdef CONFIG_X86_XEN
-+ BUG(); panic("needs review");
-+#endif
- return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \
- pgprot_val(pgprot)) & __supported_pte_mask);
- }
-diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
-index 088a945..d933743 100644
---- a/include/asm-i386/pgtable.h
-+++ b/include/asm-i386/pgtable.h
-@@ -34,7 +34,6 @@ struct vm_area_struct;
- */
- #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
- extern unsigned long empty_zero_page[1024];
--extern pgd_t swapper_pg_dir[1024];
- extern kmem_cache_t *pgd_cache;
- extern kmem_cache_t *pmd_cache;
- extern spinlock_t pgd_lock;
-@@ -208,9 +207,7 @@ extern unsigned long pg0[];
-
- /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
- #define pmd_none(x) (!(unsigned long)pmd_val(x))
--#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
- #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
--#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-
- #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-@@ -249,6 +246,7 @@ static inline pte_t pte_mkhuge(pte_t pte
- #else
- # include <asm/pgtable-2level.h>
- #endif
-+#include <mach_pgtable.h>
-
- static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
- {
-@@ -278,7 +276,8 @@ static inline pte_t ptep_get_and_clear_f
-
- static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
-- clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
-+ if (pte_write(*ptep))
-+ clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
- }
-
- /*
-@@ -399,9 +398,9 @@ extern void noexec_setup(const char *str
-
- #if defined(CONFIG_HIGHPTE)
- #define pte_offset_map(dir, address) \
-- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
-+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
- #define pte_offset_map_nested(dir, address) \
-- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
-+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
- #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
- #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
- #else
-@@ -423,14 +422,6 @@ extern void noexec_setup(const char *str
- * bit at the same time.
- */
- #define update_mmu_cache(vma,address,pte) do { } while (0)
--#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
--#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-- do { \
-- if (__dirty) { \
-- (__ptep)->pte_low = (__entry).pte_low; \
-- flush_tlb_page(__vma, __address); \
-- } \
-- } while (0)
-
- #endif /* !__ASSEMBLY__ */
-
-@@ -438,9 +429,6 @@ extern void noexec_setup(const char *str
- #define kern_addr_valid(addr) (1)
- #endif /* CONFIG_FLATMEM */
-
--#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
-- remap_pfn_range(vma, vaddr, pfn, size, prot)
--
- #define MK_IOSPACE_PFN(space, pfn) (pfn)
- #define GET_IOSPACE(pfn) 0
- #define GET_PFN(pfn) (pfn)
-diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
-index feca5d9..2e7bec8 100644
---- a/include/asm-i386/processor.h
-+++ b/include/asm-i386/processor.h
-@@ -90,8 +90,10 @@ struct cpuinfo_x86 {
-
- extern struct cpuinfo_x86 boot_cpu_data;
- extern struct cpuinfo_x86 new_cpu_data;
-+#ifndef CONFIG_X86_NO_TSS
- extern struct tss_struct doublefault_tss;
- DECLARE_PER_CPU(struct tss_struct, init_tss);
-+#endif
-
- #ifdef CONFIG_SMP
- extern struct cpuinfo_x86 cpu_data[];
-@@ -232,24 +234,6 @@ static inline unsigned int cpuid_edx(uns
- */
- extern unsigned long mmu_cr4_features;
-
--static inline void set_in_cr4 (unsigned long mask)
--{
-- unsigned cr4;
-- mmu_cr4_features |= mask;
-- cr4 = read_cr4();
-- cr4 |= mask;
-- write_cr4(cr4);
--}
--
--static inline void clear_in_cr4 (unsigned long mask)
--{
-- unsigned cr4;
-- mmu_cr4_features &= ~mask;
-- cr4 = read_cr4();
-- cr4 &= ~mask;
-- write_cr4(cr4);
--}
--
- /*
- * NSC/Cyrix CPU configuration register indexes
- */
-@@ -333,7 +317,9 @@ extern int bootloader_type;
- #define IO_BITMAP_BITS 65536
- #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
- #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-+#ifndef CONFIG_X86_NO_TSS
- #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-+#endif
- #define INVALID_IO_BITMAP_OFFSET 0x8000
- #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
-
-@@ -391,6 +377,7 @@ typedef struct {
-
- struct thread_struct;
-
-+#ifndef CONFIG_X86_NO_TSS
- struct tss_struct {
- unsigned short back_link,__blh;
- unsigned long esp0;
-@@ -436,6 +423,7 @@ struct tss_struct {
- */
- unsigned long stack[64];
- } __attribute__((packed));
-+#endif
-
- #define ARCH_MIN_TASKALIGN 16
-
-@@ -472,6 +460,9 @@ struct thread_struct {
- .io_bitmap_ptr = NULL, \
- }
-
-+#include <mach_processor.h>
-+
-+#ifndef CONFIG_X86_NO_TSS
- /*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
-@@ -486,15 +477,25 @@ struct thread_struct {
- .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
- }
-
--static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-+static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
- {
- tss->esp0 = thread->esp0;
-+#ifdef CONFIG_X86_SYSENTER
- /* This can only happen when SEP is enabled, no need to test "SEP"arately */
- if (unlikely(tss->ss1 != thread->sysenter_cs)) {
- tss->ss1 = thread->sysenter_cs;
- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
- }
-+#endif
- }
-+#else
-+#define __load_esp0(tss, thread)
-+#endif
-+
-+#define load_esp0(tss, thread) do { \
-+ __load_esp0(tss, thread); \
-+ mach_load_esp0(tss, thread); \
-+} while (0)
-
- #define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
-@@ -507,33 +508,6 @@ static inline void load_esp0(struct tss_
- regs->esp = new_esp; \
- } while (0)
-
--/*
-- * These special macros can be used to get or set a debugging register
-- */
--#define get_debugreg(var, register) \
-- __asm__("movl %%db" #register ", %0" \
-- :"=r" (var))
--#define set_debugreg(value, register) \
-- __asm__("movl %0,%%db" #register \
-- : /* no output */ \
-- :"r" (value))
--
--/*
-- * Set IOPL bits in EFLAGS from given mask
-- */
--static inline void set_iopl_mask(unsigned mask)
--{
-- unsigned int reg;
-- __asm__ __volatile__ ("pushfl;"
-- "popl %0;"
-- "andl %1, %0;"
-- "orl %2, %0;"
-- "pushl %0;"
-- "popfl"
-- : "=&r" (reg)
-- : "i" (~X86_EFLAGS_IOPL), "r" (mask));
--}
--
- /* Forward declaration, a strange C thing */
- struct task_struct;
- struct mm_struct;
-diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
-index f324c53..52eb791 100644
---- a/include/asm-i386/ptrace.h
-+++ b/include/asm-i386/ptrace.h
-@@ -60,6 +60,7 @@ struct pt_regs {
- #ifdef __KERNEL__
-
- #include <asm/vm86.h>
-+#include <mach_mode.h>
-
- struct task_struct;
- extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
-@@ -73,11 +74,11 @@ extern void send_sigtrap(struct task_str
- */
- static inline int user_mode(struct pt_regs *regs)
- {
-- return (regs->xcs & 3) != 0;
-+ return (regs->xcs & USER_MODE_MASK) != 0;
- }
- static inline int user_mode_vm(struct pt_regs *regs)
- {
-- return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0;
-+ return ((regs->xcs & USER_MODE_MASK) | (regs->eflags & VM_MASK)) != 0;
- }
- #define instruction_pointer(regs) ((regs)->eip)
- #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
-index be4ab85..75751b7 100644
---- a/include/asm-i386/rwsem.h
-+++ b/include/asm-i386/rwsem.h
-@@ -40,6 +40,7 @@
-
- #include <linux/list.h>
- #include <linux/spinlock.h>
-+#include <asm/smp_alt.h>
-
- struct rwsem_waiter;
-
-@@ -99,7 +100,7 @@ static inline void __down_read(struct rw
- {
- __asm__ __volatile__(
- "# beginning down_read\n\t"
--LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
-+LOCK " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
- " js 2f\n\t" /* jump if we weren't granted the lock */
- "1:\n\t"
- LOCK_SECTION_START("")
-@@ -130,7 +131,7 @@ static inline int __down_read_trylock(st
- " movl %1,%2\n\t"
- " addl %3,%2\n\t"
- " jle 2f\n\t"
--LOCK_PREFIX " cmpxchgl %2,%0\n\t"
-+LOCK " cmpxchgl %2,%0\n\t"
- " jnz 1b\n\t"
- "2:\n\t"
- "# ending __down_read_trylock\n\t"
-@@ -150,7 +151,7 @@ static inline void __down_write(struct r
- tmp = RWSEM_ACTIVE_WRITE_BIAS;
- __asm__ __volatile__(
- "# beginning down_write\n\t"
--LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
-+LOCK " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
- " testl %%edx,%%edx\n\t" /* was the count 0 before? */
- " jnz 2f\n\t" /* jump if we weren't granted the lock */
- "1:\n\t"
-@@ -188,7 +189,7 @@ static inline void __up_read(struct rw_s
- __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
- __asm__ __volatile__(
- "# beginning __up_read\n\t"
--LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
-+LOCK " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
- " js 2f\n\t" /* jump if the lock is being waited upon */
- "1:\n\t"
- LOCK_SECTION_START("")
-@@ -214,7 +215,7 @@ static inline void __up_write(struct rw_
- __asm__ __volatile__(
- "# beginning __up_write\n\t"
- " movl %2,%%edx\n\t"
--LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
-+LOCK " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
- " jnz 2f\n\t" /* jump if the lock is being waited upon */
- "1:\n\t"
- LOCK_SECTION_START("")
-@@ -239,7 +240,7 @@ static inline void __downgrade_write(str
- {
- __asm__ __volatile__(
- "# beginning __downgrade_write\n\t"
--LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
-+LOCK " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
- " js 2f\n\t" /* jump if the lock is being waited upon */
- "1:\n\t"
- LOCK_SECTION_START("")
-@@ -263,7 +264,7 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t"
- static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
- {
- __asm__ __volatile__(
--LOCK_PREFIX "addl %1,%0"
-+LOCK "addl %1,%0"
- : "=m"(sem->count)
- : "ir"(delta), "m"(sem->count));
- }
-@@ -276,7 +277,7 @@ static inline int rwsem_atomic_update(in
- int tmp = delta;
-
- __asm__ __volatile__(
--LOCK_PREFIX "xadd %0,(%2)"
-+LOCK "xadd %0,(%2)"
- : "+r"(tmp), "=m"(sem->count)
- : "r"(sem), "m"(sem->count)
- : "memory");
-diff --git a/include/asm-i386/scatterlist.h b/include/asm-i386/scatterlist.h
-index 55d6c95..b6f8523 100644
---- a/include/asm-i386/scatterlist.h
-+++ b/include/asm-i386/scatterlist.h
-@@ -1,11 +1,16 @@
- #ifndef _I386_SCATTERLIST_H
- #define _I386_SCATTERLIST_H
-
-+#include <linux/config.h>
-+
- struct scatterlist {
- struct page *page;
- unsigned int offset;
- dma_addr_t dma_address;
- unsigned int length;
-+#ifdef CONFIG_X86_XEN
-+ unsigned int dma_length;
-+#endif
- };
-
- /* These macros should be used after a pci_map_sg call has been done
-@@ -14,7 +19,11 @@ struct scatterlist {
- * returns.
- */
- #define sg_dma_address(sg) ((sg)->dma_address)
-+#ifndef CONFIG_X86_XEN
- #define sg_dma_len(sg) ((sg)->length)
-+#else
-+#define sg_dma_len(sg) ((sg)->dma_length)
-+#endif
-
- #define ISA_DMA_THRESHOLD (0x00ffffff)
-
-diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
-index faf9953..2fab8f8 100644
---- a/include/asm-i386/segment.h
-+++ b/include/asm-i386/segment.h
-@@ -1,6 +1,8 @@
- #ifndef _ASM_SEGMENT_H
- #define _ASM_SEGMENT_H
-
-+#include <mach_mode.h>
-+
- /*
- * The layout of the per-CPU GDT under Linux:
- *
-@@ -68,43 +70,10 @@
- #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
- #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
-
--#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
--#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
--
--#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
--#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
--
--#define GDT_ENTRY_DOUBLEFAULT_TSS 31
--
--/*
-- * The GDT has 32 entries
-- */
--#define GDT_ENTRIES 32
-+#include <mach_seg.h>
-
- #define GDT_SIZE (GDT_ENTRIES * 8)
-
--/* Simple and small GDT entries for booting only */
--
--#define GDT_ENTRY_BOOT_CS 2
--#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
--
--#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
--#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
--
--/* The PnP BIOS entries in the GDT */
--#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
--#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
--#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
--#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
--#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
--
--/* The PnP BIOS selectors */
--#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
--#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
--#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
--#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
--#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
--
- /*
- * The interrupt descriptor table has room for 256 idt's,
- * the global descriptor table is dependent on the number
-diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
-index 826a8ca..62b1fea 100644
---- a/include/asm-i386/setup.h
-+++ b/include/asm-i386/setup.h
-@@ -16,51 +16,8 @@
- #define MAXMEM_PFN PFN_DOWN(MAXMEM)
- #define MAX_NONPAE_PFN (1 << 20)
-
--#define PARAM_SIZE 4096
- #define COMMAND_LINE_SIZE 256
-
--#define OLD_CL_MAGIC_ADDR 0x90020
--#define OLD_CL_MAGIC 0xA33F
--#define OLD_CL_BASE_ADDR 0x90000
--#define OLD_CL_OFFSET 0x90022
--#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
--
--#ifndef __ASSEMBLY__
--/*
-- * This is set up by the setup-routine at boot-time
-- */
--extern unsigned char boot_params[PARAM_SIZE];
--
--#define PARAM (boot_params)
--#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
--#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
--#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
--#define E820_MAP_NR (*(char*) (PARAM+E820NR))
--#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
--#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
--#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
--#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
--#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
--#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
--#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
--#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
--#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0)))
--#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
--#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
--#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
--#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
--#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
--#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
--#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
--#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
--#define INITRD_START (*(unsigned long *) (PARAM+0x218))
--#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
--#define EDID_INFO (*(struct edid_info *) (PARAM+0x140))
--#define EDD_NR (*(unsigned char *) (PARAM+EDDNR))
--#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
--#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
--#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
--
--#endif /* __ASSEMBLY__ */
-+#include <mach_setup.h>
-
- #endif /* _i386_SETUP_H */
-diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
-index 61d3ab9..daf519a 100644
---- a/include/asm-i386/smp.h
-+++ b/include/asm-i386/smp.h
-@@ -59,8 +59,13 @@ extern void cpu_uninit(void);
- */
- #define raw_smp_processor_id() (current_thread_info()->cpu)
-
-+#ifndef CONFIG_X86_XEN
- extern cpumask_t cpu_callout_map;
- extern cpumask_t cpu_callin_map;
-+#else
-+#define cpu_callout_map cpu_possible_map
-+#define cpu_callin_map cpu_possible_map
-+#endif
- extern cpumask_t cpu_possible_map;
-
- /* We don't mark CPUs online until __cpu_up(), so we need another measure */
-diff --git a/include/asm-i386/smp_alt.h b/include/asm-i386/smp_alt.h
-new file mode 100644
-index 0000000..67307c3
---- /dev/null
-+++ b/include/asm-i386/smp_alt.h
-@@ -0,0 +1,32 @@
-+#ifndef __ASM_SMP_ALT_H__
-+#define __ASM_SMP_ALT_H__
-+
-+#include <linux/config.h>
-+
-+#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
-+#define LOCK \
-+ "6677: nop\n" \
-+ ".section __smp_alternatives,\"a\"\n" \
-+ ".long 6677b\n" \
-+ ".long 6678f\n" \
-+ ".previous\n" \
-+ ".section __smp_replacements,\"a\"\n" \
-+ "6678: .byte 1\n" \
-+ ".byte 1\n" \
-+ ".byte 0\n" \
-+ ".byte 1\n" \
-+ ".byte -1\n" \
-+ "lock\n" \
-+ "nop\n" \
-+ ".previous\n"
-+void prepare_for_smp(void);
-+void unprepare_for_smp(void);
-+#else
-+#define LOCK "lock ; "
-+#endif
-+#else
-+#define LOCK ""
-+#endif
-+
-+#endif /* __ASM_SMP_ALT_H__ */
-diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
-index 2360435..fed8e97 100644
---- a/include/asm-i386/spinlock.h
-+++ b/include/asm-i386/spinlock.h
-@@ -6,6 +6,7 @@
- #include <asm/page.h>
- #include <linux/config.h>
- #include <linux/compiler.h>
-+#include <asm/smp_alt.h>
-
- /*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
-@@ -22,8 +23,9 @@
- (*(volatile signed char *)(&(x)->slock) <= 0)
-
- #define __raw_spin_lock_string \
-- "\n1:\t" \
-- "lock ; decb %0\n\t" \
-+ "\n1:\n" \
-+ LOCK \
-+ "decb %0\n\t" \
- "jns 3f\n" \
- "2:\t" \
- "rep;nop\n\t" \
-@@ -32,9 +34,11 @@
- "jmp 1b\n" \
- "3:\n\t"
-
-+#ifndef CONFIG_X86_XEN
- #define __raw_spin_lock_string_flags \
-- "\n1:\t" \
-- "lock ; decb %0\n\t" \
-+ "\n1:\n" \
-+ LOCK \
-+ "decb %0\n\t" \
- "jns 4f\n\t" \
- "2:\t" \
- "testl $0x200, %1\n\t" \
-@@ -47,6 +51,9 @@
- "cli\n\t" \
- "jmp 1b\n" \
- "4:\n\t"
-+#else
-+#define __raw_spin_lock_string_flags __raw_spin_lock_string
-+#endif
-
- static inline void __raw_spin_lock(raw_spinlock_t *lock)
- {
-@@ -65,10 +72,34 @@ static inline void __raw_spin_lock_flags
- static inline int __raw_spin_trylock(raw_spinlock_t *lock)
- {
- char oldval;
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+ __asm__ __volatile__(
-+ "1:movb %1,%b0\n"
-+ "movb $0,%1\n"
-+ "2:"
-+ ".section __smp_alternatives,\"a\"\n"
-+ ".long 1b\n"
-+ ".long 3f\n"
-+ ".previous\n"
-+ ".section __smp_replacements,\"a\"\n"
-+ "3: .byte 2b - 1b\n"
-+ ".byte 5f-4f\n"
-+ ".byte 0\n"
-+ ".byte 6f-5f\n"
-+ ".byte -1\n"
-+ "4: xchgb %b0,%1\n"
-+ "5: movb %1,%b0\n"
-+ "movb $0,%1\n"
-+ "6:\n"
-+ ".previous\n"
-+ :"=q" (oldval), "=m" (lock->slock)
-+ :"0" (0) : "memory");
-+#else
- __asm__ __volatile__(
- "xchgb %b0,%1"
- :"=q" (oldval), "=m" (lock->slock)
- :"0" (0) : "memory");
-+#endif
- return oldval > 0;
- }
-
-@@ -178,12 +209,12 @@ static inline int __raw_write_trylock(ra
-
- static inline void __raw_read_unlock(raw_rwlock_t *rw)
- {
-- asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
-+ asm volatile(LOCK "incl %0" :"=m" (rw->lock) : : "memory");
- }
-
- static inline void __raw_write_unlock(raw_rwlock_t *rw)
- {
-- asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
-+ asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ", %0"
- : "=m" (rw->lock) : : "memory");
- }
-
-diff --git a/include/asm-i386/swiotlb.h b/include/asm-i386/swiotlb.h
-new file mode 100644
-index 0000000..d5a9f13
---- /dev/null
-+++ b/include/asm-i386/swiotlb.h
-@@ -0,0 +1,60 @@
-+#ifndef _ASM_SWIOTLB_H
-+#define _ASM_SWIOTLB_H 1
-+
-+#include <linux/config.h>
-+
-+#ifdef CONFIG_SWIOTLB
-+
-+/* SWIOTLB interface */
-+
-+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
-+ int dir);
-+extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-+ size_t size, int dir);
-+extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
-+ dma_addr_t dev_addr,
-+ size_t size, int dir);
-+extern void swiotlb_sync_single_for_device(struct device *hwdev,
-+ dma_addr_t dev_addr,
-+ size_t size, int dir);
-+extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
-+ struct scatterlist *sg, int nelems,
-+ int dir);
-+extern void swiotlb_sync_sg_for_device(struct device *hwdev,
-+ struct scatterlist *sg, int nelems,
-+ int dir);
-+extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, int direction);
-+extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, int direction);
-+extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-+extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction direction);
-+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+ size_t size, enum dma_data_direction direction);
-+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
-+extern void swiotlb_init(void);
-+
-+extern int swiotlb;
-+
-+#else /* CONFIG_SWIOTLB */
-+
-+#define swiotlb 0
-+#define swiotlb_sync_single_for_cpu(dev, addr, size, dir) \
-+ ((void)(dev),(void)(addr),(void)(size),(void)(dir))
-+#define swiotlb_sync_single_for_device(dev, addr, size, dir) \
-+ ((void)(dev),(void)(addr),(void)(size),(void)(dir))
-+#define swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir) \
-+ ((void)(dev),(void)(sg),(void)(nelems),(void)(dir))
-+#define swiotlb_sync_sg_for_device(dev, sg, nelems, dir) \
-+ ((void)(dev),(void)(sg),(void)(nelems),(void)(dir))
-+
-+#endif
-+
-+#define swiotlb_sync_single_range_for_cpu(dev, dma_handle, offset, size, direction) \
-+ swiotlb_sync_single_for_cpu(dev, (dma_handle) + (offset), size, direction)
-+#define swiotlb_sync_single_range_for_device(dev, dma_handle, offset, size, direction) \
-+ swiotlb_sync_single_for_device(dev, (dma_handle) + (offset), size, direction)
-+
-+#endif /* _ASM_SWIOTLB_H */
-diff --git a/include/asm-i386/synch_bitops.h b/include/asm-i386/synch_bitops.h
-new file mode 100644
-index 0000000..2d3f7a8
---- /dev/null
-+++ b/include/asm-i386/synch_bitops.h
-@@ -0,0 +1,143 @@
-+#ifndef __ASM_SYNCH_BITOPS_H__
-+#define __ASM_SYNCH_BITOPS_H__
-+
-+/*
-+ * Copyright 1992, Linus Torvalds.
-+ * Heavily modified to provide guaranteed strong synchronisation
-+ * when communicating with Xen or other guest OSes running on other CPUs.
-+ */
-+
-+#include <linux/config.h>
-+
-+#define ADDR (*(volatile long *) addr)
-+
-+static __inline__ void synch_set_bit(int nr, volatile void * addr)
-+{
-+ __asm__ __volatile__ (
-+ "lock btsl %1,%0"
-+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-+{
-+ __asm__ __volatile__ (
-+ "lock btrl %1,%0"
-+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ void synch_change_bit(int nr, volatile void * addr)
-+{
-+ __asm__ __volatile__ (
-+ "lock btcl %1,%0"
-+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-+{
-+ int oldbit;
-+ __asm__ __volatile__ (
-+ "lock btsl %2,%1\n\tsbbl %0,%0"
-+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+ return oldbit;
-+}
-+
-+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-+{
-+ int oldbit;
-+ __asm__ __volatile__ (
-+ "lock btrl %2,%1\n\tsbbl %0,%0"
-+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+ return oldbit;
-+}
-+
-+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-+{
-+ int oldbit;
-+
-+ __asm__ __volatile__ (
-+ "lock btcl %2,%1\n\tsbbl %0,%0"
-+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
-+ return oldbit;
-+}
-+
-+struct __synch_xchg_dummy { unsigned long a[100]; };
-+#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
-+
-+#define synch_cmpxchg(ptr, old, new) \
-+((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
-+ (unsigned long)(old), \
-+ (unsigned long)(new), \
-+ sizeof(*(ptr))))
-+
-+static inline unsigned long __synch_cmpxchg(volatile void *ptr,
-+ unsigned long old,
-+ unsigned long new, int size)
-+{
-+ unsigned long prev;
-+ switch (size) {
-+ case 1:
-+ __asm__ __volatile__("lock cmpxchgb %b1,%2"
-+ : "=a"(prev)
-+ : "q"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+ case 2:
-+ __asm__ __volatile__("lock cmpxchgw %w1,%2"
-+ : "=a"(prev)
-+ : "r"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+#if defined(__i386__)
-+ case 4:
-+ __asm__ __volatile__("lock cmpxchgl %1,%2"
-+ : "=a"(prev)
-+ : "r"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+#elif defined(__x86_64__)
-+ case 4:
-+ __asm__ __volatile__("lock cmpxchgl %k1,%2"
-+ : "=a"(prev)
-+ : "r"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+ case 8:
-+ __asm__ __volatile__("lock cmpxchgq %1,%2"
-+ : "=a"(prev)
-+ : "r"(new), "m"(*__synch_xg(ptr)),
-+ "0"(old)
-+ : "memory");
-+ return prev;
-+#else
-+#error sync_bitops not defined for arch
-+#endif
-+ }
-+ return old;
-+}
-+
-+static __always_inline int synch_const_test_bit(int nr,
-+ const volatile void * addr)
-+{
-+ return ((1U << (nr & 31)) &
-+ (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-+}
-+
-+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-+{
-+ int oldbit;
-+ __asm__ __volatile__ (
-+ "btl %2,%1\n\tsbbl %0,%0"
-+ : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
-+ return oldbit;
-+}
-+
-+#define synch_test_bit(nr,addr) \
-+(__builtin_constant_p(nr) ? \
-+ synch_const_test_bit((nr),(addr)) : \
-+ synch_var_test_bit((nr),(addr)))
-+
-+#endif /* __ASM_SYNCH_BITOPS_H__ */
-diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
-index 399145a..1e99ff8 100644
---- a/include/asm-i386/system.h
-+++ b/include/asm-i386/system.h
-@@ -5,7 +5,7 @@
- #include <linux/kernel.h>
- #include <asm/segment.h>
- #include <asm/cpufeature.h>
--#include <linux/bitops.h> /* for LOCK_PREFIX */
-+#include <asm/smp_alt.h>
-
- #ifdef __KERNEL__
-
-@@ -83,64 +83,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
- #define savesegment(seg, value) \
- asm volatile("mov %%" #seg ",%0":"=rm" (value))
-
--/*
-- * Clear and set 'TS' bit respectively
-- */
--#define clts() __asm__ __volatile__ ("clts")
--#define read_cr0() ({ \
-- unsigned int __dummy; \
-- __asm__ __volatile__( \
-- "movl %%cr0,%0\n\t" \
-- :"=r" (__dummy)); \
-- __dummy; \
--})
--#define write_cr0(x) \
-- __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
--
--#define read_cr2() ({ \
-- unsigned int __dummy; \
-- __asm__ __volatile__( \
-- "movl %%cr2,%0\n\t" \
-- :"=r" (__dummy)); \
-- __dummy; \
--})
--#define write_cr2(x) \
-- __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
--
--#define read_cr3() ({ \
-- unsigned int __dummy; \
-- __asm__ ( \
-- "movl %%cr3,%0\n\t" \
-- :"=r" (__dummy)); \
-- __dummy; \
--})
--#define write_cr3(x) \
-- __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
--
--#define read_cr4() ({ \
-- unsigned int __dummy; \
-- __asm__( \
-- "movl %%cr4,%0\n\t" \
-- :"=r" (__dummy)); \
-- __dummy; \
--})
--
--#define read_cr4_safe() ({ \
-- unsigned int __dummy; \
-- /* This could fault if %cr4 does not exist */ \
-- __asm__("1: movl %%cr4, %0 \n" \
-- "2: \n" \
-- ".section __ex_table,\"a\" \n" \
-- ".long 1b,2b \n" \
-- ".previous \n" \
-- : "=r" (__dummy): "0" (0)); \
-- __dummy; \
--})
--
--#define write_cr4(x) \
-- __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
--#define stts() write_cr0(8 | read_cr0())
--
- #endif /* __KERNEL__ */
-
- #define wbinvd() \
-@@ -271,19 +213,19 @@ static inline unsigned long __cmpxchg(vo
- unsigned long prev;
- switch (size) {
- case 1:
-- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-+ __asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
-- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-+ __asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
-- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
-+ __asm__ __volatile__(LOCK "cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
-@@ -336,7 +278,7 @@ static inline unsigned long long __cmpxc
- unsigned long long new)
- {
- unsigned long long prev;
-- __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
-+ __asm__ __volatile__(LOCK "cmpxchg8b %3"
- : "=A"(prev)
- : "b"((unsigned long)new),
- "c"((unsigned long)(new >> 32)),
-@@ -503,11 +445,55 @@ struct alt_instr {
- #endif
-
- #ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
-+#define smp_alt_mb(instr) \
-+__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
-+ ".section __smp_alternatives,\"a\"\n" \
-+ ".long 6667b\n" \
-+ ".long 6673f\n" \
-+ ".previous\n" \
-+ ".section __smp_replacements,\"a\"\n" \
-+ "6673:.byte 6668b-6667b\n" \
-+ ".byte 6670f-6669f\n" \
-+ ".byte 6671f-6670f\n" \
-+ ".byte 0\n" \
-+ ".byte %c0\n" \
-+ "6669:lock;addl $0,0(%%esp)\n" \
-+ "6670:" instr "\n" \
-+ "6671:\n" \
-+ ".previous\n" \
-+ : \
-+ : "i" (X86_FEATURE_XMM2) \
-+ : "memory")
-+#define smp_mb() smp_alt_mb("mfence")
-+#define smp_rmb() smp_alt_mb("lfence")
-+#define set_mb(var, value) do { \
-+unsigned long __set_mb_temp; \
-+__asm__ __volatile__("6667:movl %1, %0\n6668:\n" \
-+ ".section __smp_alternatives,\"a\"\n" \
-+ ".long 6667b\n" \
-+ ".long 6673f\n" \
-+ ".previous\n" \
-+ ".section __smp_replacements,\"a\"\n" \
-+ "6673: .byte 6668b-6667b\n" \
-+ ".byte 6670f-6669f\n" \
-+ ".byte 0\n" \
-+ ".byte 6671f-6670f\n" \
-+ ".byte -1\n" \
-+ "6669: xchg %1, %0\n" \
-+ "6670:movl %1, %0\n" \
-+ "6671:\n" \
-+ ".previous\n" \
-+ : "=m" (var), "=r" (__set_mb_temp) \
-+ : "1" (value) \
-+ : "memory"); } while (0)
-+#else
- #define smp_mb() mb()
- #define smp_rmb() rmb()
-+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-+#endif
- #define smp_wmb() wmb()
- #define smp_read_barrier_depends() read_barrier_depends()
--#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
- #else
- #define smp_mb() barrier()
- #define smp_rmb() barrier()
-@@ -518,25 +504,7 @@ struct alt_instr {
-
- #define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
--/* interrupt control.. */
--#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
--#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
--#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
--#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
--/* used in the idle loop; sti takes one instruction cycle to complete */
--#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
--/* used when interrupts are already enabled or to shutdown the processor */
--#define halt() __asm__ __volatile__("hlt": : :"memory")
--
--#define irqs_disabled() \
--({ \
-- unsigned long flags; \
-- local_save_flags(flags); \
-- !(flags & (1<<9)); \
--})
--
--/* For spinlocks etc */
--#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
-+#include <mach_system.h>
-
- /*
- * disable hlt during certain critical i/o operations
-diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
-index ab216e1..98b7876 100644
---- a/include/asm-i386/tlbflush.h
-+++ b/include/asm-i386/tlbflush.h
-@@ -5,64 +5,11 @@
- #include <linux/mm.h>
- #include <asm/processor.h>
-
--#define __flush_tlb() \
-- do { \
-- unsigned int tmpreg; \
-- \
-- __asm__ __volatile__( \
-- "movl %%cr3, %0; \n" \
-- "movl %0, %%cr3; # flush TLB \n" \
-- : "=r" (tmpreg) \
-- :: "memory"); \
-- } while (0)
--
--/*
-- * Global pages have to be flushed a bit differently. Not a real
-- * performance problem because this does not happen often.
-- */
--#define __flush_tlb_global() \
-- do { \
-- unsigned int tmpreg, cr4, cr4_orig; \
-- \
-- __asm__ __volatile__( \
-- "movl %%cr4, %2; # turn off PGE \n" \
-- "movl %2, %1; \n" \
-- "andl %3, %1; \n" \
-- "movl %1, %%cr4; \n" \
-- "movl %%cr3, %0; \n" \
-- "movl %0, %%cr3; # flush TLB \n" \
-- "movl %2, %%cr4; # turn PGE back on \n" \
-- : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
-- : "i" (~X86_CR4_PGE) \
-- : "memory"); \
-- } while (0)
--
- extern unsigned long pgkern_mask;
-
--# define __flush_tlb_all() \
-- do { \
-- if (cpu_has_pge) \
-- __flush_tlb_global(); \
-- else \
-- __flush_tlb(); \
-- } while (0)
--
- #define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-
--#define __flush_tlb_single(addr) \
-- __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
--
--#ifdef CONFIG_X86_INVLPG
--# define __flush_tlb_one(addr) __flush_tlb_single(addr)
--#else
--# define __flush_tlb_one(addr) \
-- do { \
-- if (cpu_has_invlpg) \
-- __flush_tlb_single(addr); \
-- else \
-- __flush_tlb(); \
-- } while (0)
--#endif
-+#include <mach_tlbflush.h>
-
- /*
- * TLB flushing:
-diff --git a/include/asm-i386/vga.h b/include/asm-i386/vga.h
-index ef0c0e5..4de2ca6 100644
---- a/include/asm-i386/vga.h
-+++ b/include/asm-i386/vga.h
-@@ -12,7 +12,7 @@
- * access the videoram directly without any black magic.
- */
-
--#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x)
-+#define VGA_MAP_MEM(x) ((unsigned long)isa_bus_to_virt(x))
-
- #define vga_readb(x) (*(x))
- #define vga_writeb(x,y) (*(y) = (x))
-diff --git a/include/asm-ia64/fixmap.h b/include/asm-ia64/fixmap.h
-new file mode 100644
-index 0000000..4b32a82
---- /dev/null
-+++ b/include/asm-ia64/fixmap.h
-@@ -0,0 +1,2 @@
-+#define clear_fixmap(x) do {} while (0)
-+#define set_fixmap(x,y) do {} while (0)
-diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
-index 4fb4e43..b23a280 100644
---- a/include/asm-ia64/gcc_intrin.h
-+++ b/include/asm-ia64/gcc_intrin.h
-@@ -26,7 +26,7 @@ extern void ia64_bad_param_for_getreg (v
-
- register unsigned long ia64_r13 asm ("r13") __attribute_used__;
-
--#define ia64_setreg(regnum, val) \
-+#define __ia64_setreg(regnum, val) \
- ({ \
- switch (regnum) { \
- case _IA64_REG_PSR_L: \
-@@ -55,7 +55,7 @@ register unsigned long ia64_r13 asm ("r1
- } \
- })
-
--#define ia64_getreg(regnum) \
-+#define __ia64_getreg(regnum) \
- ({ \
- __u64 ia64_intri_res; \
- \
-@@ -92,7 +92,7 @@ register unsigned long ia64_r13 asm ("r1
-
- #define ia64_hint_pause 0
-
--#define ia64_hint(mode) \
-+#define __ia64_hint(mode) \
- ({ \
- switch (mode) { \
- case ia64_hint_pause: \
-@@ -374,7 +374,7 @@ register unsigned long ia64_r13 asm ("r1
-
- #define ia64_invala() asm volatile ("invala" ::: "memory")
-
--#define ia64_thash(addr) \
-+#define __ia64_thash(addr) \
- ({ \
- __u64 ia64_intri_res; \
- asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
-@@ -394,18 +394,18 @@ register unsigned long ia64_r13 asm ("r1
-
- #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
-
--#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
-+#define __ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
-
--#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
-+#define __ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
-
-
--#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
-+#define __ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
- :: "r"(trnum), "r"(addr) : "memory")
-
--#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
-+#define __ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
- :: "r"(trnum), "r"(addr) : "memory")
-
--#define ia64_tpa(addr) \
-+#define __ia64_tpa(addr) \
- ({ \
- __u64 ia64_pa; \
- asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
-@@ -415,22 +415,22 @@ register unsigned long ia64_r13 asm ("r1
- #define __ia64_set_dbr(index, val) \
- asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
--#define ia64_set_ibr(index, val) \
-+#define __ia64_set_ibr(index, val) \
- asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
--#define ia64_set_pkr(index, val) \
-+#define __ia64_set_pkr(index, val) \
- asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
--#define ia64_set_pmc(index, val) \
-+#define __ia64_set_pmc(index, val) \
- asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
--#define ia64_set_pmd(index, val) \
-+#define __ia64_set_pmd(index, val) \
- asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
-
--#define ia64_set_rr(index, val) \
-+#define __ia64_set_rr(index, val) \
- asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
-
--#define ia64_get_cpuid(index) \
-+#define __ia64_get_cpuid(index) \
- ({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
-@@ -444,21 +444,21 @@ register unsigned long ia64_r13 asm ("r1
- ia64_intri_res; \
- })
-
--#define ia64_get_ibr(index) \
-+#define __ia64_get_ibr(index) \
- ({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
- ia64_intri_res; \
- })
-
--#define ia64_get_pkr(index) \
-+#define __ia64_get_pkr(index) \
- ({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
- ia64_intri_res; \
- })
-
--#define ia64_get_pmc(index) \
-+#define __ia64_get_pmc(index) \
- ({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
-@@ -466,48 +466,48 @@ register unsigned long ia64_r13 asm ("r1
- })
-
-
--#define ia64_get_pmd(index) \
-+#define __ia64_get_pmd(index) \
- ({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
- ia64_intri_res; \
- })
-
--#define ia64_get_rr(index) \
-+#define __ia64_get_rr(index) \
- ({ \
- __u64 ia64_intri_res; \
- asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
- ia64_intri_res; \
- })
-
--#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
-+#define __ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
-
-
- #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
-
--#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
--#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
-+#define __ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
-+#define __ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
- #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
- #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
-
--#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
-+#define __ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
-
--#define ia64_ptcga(addr, size) \
-+#define __ia64_ptcga(addr, size) \
- do { \
- asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
- ia64_dv_serialize_data(); \
- } while (0)
-
--#define ia64_ptcl(addr, size) \
-+#define __ia64_ptcl(addr, size) \
- do { \
- asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
- ia64_dv_serialize_data(); \
- } while (0)
-
--#define ia64_ptri(addr, size) \
-+#define __ia64_ptri(addr, size) \
- asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
-
--#define ia64_ptrd(addr, size) \
-+#define __ia64_ptrd(addr, size) \
- asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
-
- /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
-@@ -589,7 +589,7 @@ do { \
- } \
- })
-
--#define ia64_intrin_local_irq_restore(x) \
-+#define __ia64_intrin_local_irq_restore(x) \
- do { \
- asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
- "(p6) ssm psr.i;" \
-@@ -598,4 +598,6 @@ do { \
- :: "r"((x)) : "p6", "p7", "memory"); \
- } while (0)
-
-+#define __ia64_get_psr_i() (__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
-+
- #endif /* _ASM_IA64_GCC_INTRIN_H */
-diff --git a/include/asm-ia64/hypercall.h b/include/asm-ia64/hypercall.h
-new file mode 100644
-index 0000000..fd57868
---- /dev/null
-+++ b/include/asm-ia64/hypercall.h
-@@ -0,0 +1,253 @@
-+/******************************************************************************
-+ * hypercall.h
-+ *
-+ * Linux-specific hypervisor handling.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
-+
-+#include <xen/interface/xen.h>
-+#include <xen/interface/sched.h>
-+
-+/* FIXME: temp place to hold these page related macros */
-+#include <asm/page.h>
-+#define virt_to_machine(v) __pa(v)
-+#define machine_to_virt(m) __va(m)
-+#define virt_to_mfn(v) ((__pa(v)) >> PAGE_SHIFT)
-+#define mfn_to_virt(m) (__va((m) << PAGE_SHIFT))
-+
-+/*
-+ * Assembler stubs for hyper-calls.
-+ */
-+
-+#define _hypercall0(type, name) \
-+({ \
-+ long __res; \
-+ __asm__ __volatile__ (";;\n" \
-+ "mov r2=%1\n" \
-+ "break 0x1000 ;;\n" \
-+ "mov %0=r8 ;;\n" \
-+ : "=r" (__res) \
-+ : "i" (__HYPERVISOR_##name) \
-+ : "r2","r8", \
-+ "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall1(type, name, a1) \
-+({ \
-+ long __res; \
-+ __asm__ __volatile__ (";;\n" \
-+ "mov r14=%2\n" \
-+ "mov r2=%1\n" \
-+ "break 0x1000 ;;\n" \
-+ "mov %0=r8 ;;\n" \
-+ : "=r" (__res) \
-+ : "i" (__HYPERVISOR_##name), \
-+ "r" ((unsigned long)(a1)) \
-+ : "r14","r2","r8", \
-+ "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall2(type, name, a1, a2) \
-+({ \
-+ long __res; \
-+ __asm__ __volatile__ (";;\n" \
-+ "mov r14=%2\n" \
-+ "mov r15=%3\n" \
-+ "mov r2=%1\n" \
-+ "break 0x1000 ;;\n" \
-+ "mov %0=r8 ;;\n" \
-+ : "=r" (__res) \
-+ : "i" (__HYPERVISOR_##name), \
-+ "r" ((unsigned long)(a1)), \
-+ "r" ((unsigned long)(a2)) \
-+ : "r14","r15","r2","r8", \
-+ "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall3(type, name, a1, a2, a3) \
-+({ \
-+ long __res; \
-+ __asm__ __volatile__ (";;\n" \
-+ "mov r14=%2\n" \
-+ "mov r15=%3\n" \
-+ "mov r16=%4\n" \
-+ "mov r2=%1\n" \
-+ "break 0x1000 ;;\n" \
-+ "mov %0=r8 ;;\n" \
-+ : "=r" (__res) \
-+ : "i" (__HYPERVISOR_##name), \
-+ "r" ((unsigned long)(a1)), \
-+ "r" ((unsigned long)(a2)), \
-+ "r" ((unsigned long)(a3)) \
-+ : "r14","r15","r16","r2","r8", \
-+ "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall4(type, name, a1, a2, a3, a4) \
-+({ \
-+ long __res; \
-+ __asm__ __volatile__ (";;\n" \
-+ "mov r14=%2\n" \
-+ "mov r15=%3\n" \
-+ "mov r16=%4\n" \
-+ "mov r17=%5\n" \
-+ "mov r2=%1\n" \
-+ "break 0x1000 ;;\n" \
-+ "mov %0=r8 ;;\n" \
-+ : "=r" (__res) \
-+ : "i" (__HYPERVISOR_##name), \
-+ "r" ((unsigned long)(a1)), \
-+ "r" ((unsigned long)(a2)), \
-+ "r" ((unsigned long)(a3)), \
-+ "r" ((unsigned long)(a4)) \
-+ : "r14","r15","r16","r2","r8", \
-+ "r17","memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
-+({ \
-+ long __res; \
-+ __asm__ __volatile__ (";;\n" \
-+ "mov r14=%2\n" \
-+ "mov r15=%3\n" \
-+ "mov r16=%4\n" \
-+ "mov r17=%5\n" \
-+ "mov r18=%6\n" \
-+ "mov r2=%1\n" \
-+ "break 0x1000 ;;\n" \
-+ "mov %0=r8 ;;\n" \
-+ : "=r" (__res) \
-+ : "i" (__HYPERVISOR_##name), \
-+ "r" ((unsigned long)(a1)), \
-+ "r" ((unsigned long)(a2)), \
-+ "r" ((unsigned long)(a3)), \
-+ "r" ((unsigned long)(a4)), \
-+ "r" ((unsigned long)(a5)) \
-+ : "r14","r15","r16","r2","r8", \
-+ "r17","r18","memory" ); \
-+ (type)__res; \
-+})
-+
-+static inline int
-+HYPERVISOR_sched_op(
-+ int cmd, unsigned long arg)
-+{
-+ return _hypercall2(int, sched_op, cmd, arg);
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(
-+ u64 timeout)
-+{
-+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
-+ unsigned long timeout_lo = (unsigned long)timeout;
-+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
-+}
-+
-+static inline int
-+HYPERVISOR_dom0_op(
-+ dom0_op_t *dom0_op)
-+{
-+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
-+ return _hypercall1(int, dom0_op, dom0_op);
-+}
-+
-+static inline int
-+HYPERVISOR_multicall(
-+ void *call_list, int nr_calls)
-+{
-+ return _hypercall2(int, multicall, call_list, nr_calls);
-+}
-+
-+static inline int
-+HYPERVISOR_memory_op(
-+ unsigned int cmd, void *arg)
-+{
-+ return _hypercall2(int, memory_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_event_channel_op(
-+ void *op)
-+{
-+ return _hypercall1(int, event_channel_op, op);
-+}
-+
-+static inline int
-+HYPERVISOR_xen_version(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, xen_version, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_console_io(
-+ int cmd, int count, char *str)
-+{
-+ return _hypercall3(int, console_io, cmd, count, str);
-+}
-+
-+static inline int
-+HYPERVISOR_physdev_op(
-+ void *physdev_op)
-+{
-+ return _hypercall1(int, physdev_op, physdev_op);
-+}
-+
-+static inline int
-+HYPERVISOR_grant_table_op(
-+ unsigned int cmd, void *uop, unsigned int count)
-+{
-+ return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
-+
-+static inline int
-+HYPERVISOR_vcpu_op(
-+ int cmd, int vcpuid, void *extra_args)
-+{
-+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
-+
-+static inline int
-+HYPERVISOR_suspend(
-+ unsigned long srec)
-+{
-+ return _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+ SHUTDOWN_suspend, srec);
-+}
-+
-+extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
-+static inline void exit_idle(void) {}
-+#define do_IRQ(irq, regs) __do_IRQ((irq), (regs))
-+
-+#endif /* __HYPERCALL_H__ */
-diff --git a/include/asm-ia64/hypervisor.h b/include/asm-ia64/hypervisor.h
-new file mode 100644
-index 0000000..4771111
---- /dev/null
-+++ b/include/asm-ia64/hypervisor.h
-@@ -0,0 +1,70 @@
-+/******************************************************************************
-+ * hypervisor.h
-+ *
-+ * Linux-specific hypervisor handling.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERVISOR_H__
-+#define __HYPERVISOR_H__
-+
-+#include <linux/config.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/version.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/dom0_ops.h>
-+#include <asm/ptrace.h>
-+#include <asm/page.h>
-+
-+extern shared_info_t *HYPERVISOR_shared_info;
-+extern start_info_t *xen_start_info;
-+
-+void force_evtchn_callback(void);
-+
-+#include <asm/hypercall.h>
-+
-+// for drivers/xen/privcmd/privcmd.c
-+#define direct_remap_pfn_range(a,b,c,d,e,f) remap_pfn_range(a,b,c,d,e)
-+#define pfn_to_mfn(x) (x)
-+#define mfn_to_pfn(x) (x)
-+#define machine_to_phys_mapping 0
-+
-+// for drivers/xen/balloon/balloon.c
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
-+#define pte_mfn(_x) pte_pfn(_x)
-+#define INVALID_P2M_ENTRY (~0UL)
-+#define __pte_ma(_x) ((pte_t) {(_x)})
-+#define phys_to_machine_mapping_valid(_x) (1)
-+#define kmap_flush_unused() do {} while (0)
-+#define set_phys_to_machine(_x,_y) do {} while (0)
-+#define xen_machphys_update(_x,_y) do {} while (0)
-+#define pfn_pte_ma(_x,_y) __pte_ma(0)
-+
-+#endif /* __HYPERVISOR_H__ */
-diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h
-index a7122d8..d4e5471 100644
---- a/include/asm-ia64/intel_intrin.h
-+++ b/include/asm-ia64/intel_intrin.h
-@@ -119,10 +119,10 @@ __s64 _m64_popcnt(__s64 a);
- * intrinsic
- */
-
--#define ia64_getreg __getReg
--#define ia64_setreg __setReg
-+#define __ia64_getreg __getReg
-+#define __ia64_setreg __setReg
-
--#define ia64_hint(x)
-+#define __ia64_hint(x)
-
- #define ia64_mux1_brcst 0
- #define ia64_mux1_mix 8
-@@ -135,16 +135,16 @@ __s64 _m64_popcnt(__s64 a);
- #define ia64_getf_exp __getf_exp
- #define ia64_shrp _m64_shrp
-
--#define ia64_tpa __tpa
-+#define __ia64_tpa __tpa
- #define ia64_invala __invala
- #define ia64_invala_gr __invala_gr
- #define ia64_invala_fr __invala_fr
- #define ia64_nop __nop
- #define ia64_sum __sum
--#define ia64_ssm __ssm
-+#define __ia64_ssm __ssm
- #define ia64_rum __rum
--#define ia64_rsm __rsm
--#define ia64_fc __fc
-+#define __ia64_rsm __rsm
-+#define __ia64_fc __fc
-
- #define ia64_ldfs __ldfs
- #define ia64_ldfd __ldfd
-@@ -182,24 +182,24 @@ __s64 _m64_popcnt(__s64 a);
-
- #define __ia64_set_dbr(index, val) \
- __setIndReg(_IA64_REG_INDR_DBR, index, val)
--#define ia64_set_ibr(index, val) \
-+#define __ia64_set_ibr(index, val) \
- __setIndReg(_IA64_REG_INDR_IBR, index, val)
--#define ia64_set_pkr(index, val) \
-+#define __ia64_set_pkr(index, val) \
- __setIndReg(_IA64_REG_INDR_PKR, index, val)
--#define ia64_set_pmc(index, val) \
-+#define __ia64_set_pmc(index, val) \
- __setIndReg(_IA64_REG_INDR_PMC, index, val)
--#define ia64_set_pmd(index, val) \
-+#define __ia64_set_pmd(index, val) \
- __setIndReg(_IA64_REG_INDR_PMD, index, val)
--#define ia64_set_rr(index, val) \
-+#define __ia64_set_rr(index, val) \
- __setIndReg(_IA64_REG_INDR_RR, index, val)
-
--#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
-+#define __ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
- #define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
--#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
--#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
--#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
--#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
--#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
-+#define __ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
-+#define __ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
-+#define __ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
-+#define __ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
-+#define __ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
-
- #define ia64_srlz_d __dsrlz
- #define ia64_srlz_i __isrlz
-@@ -218,18 +218,18 @@ __s64 _m64_popcnt(__s64 a);
- #define ia64_ld8_acq __ld8_acq
-
- #define ia64_sync_i __synci
--#define ia64_thash __thash
--#define ia64_ttag __ttag
--#define ia64_itcd __itcd
--#define ia64_itci __itci
--#define ia64_itrd __itrd
--#define ia64_itri __itri
--#define ia64_ptce __ptce
--#define ia64_ptcl __ptcl
--#define ia64_ptcg __ptcg
--#define ia64_ptcga __ptcga
--#define ia64_ptri __ptri
--#define ia64_ptrd __ptrd
-+#define __ia64_thash __thash
-+#define __ia64_ttag __ttag
-+#define __ia64_itcd __itcd
-+#define __ia64_itci __itci
-+#define __ia64_itrd __itrd
-+#define __ia64_itri __itri
-+#define __ia64_ptce __ptce
-+#define __ia64_ptcl __ptcl
-+#define __ia64_ptcg __ptcg
-+#define __ia64_ptcga __ptcga
-+#define __ia64_ptri __ptri
-+#define __ia64_ptrd __ptrd
- #define ia64_dep_mi _m64_dep_mi
-
- /* Values for lfhint in __lfetch and __lfetch_fault */
-@@ -244,14 +244,16 @@ __s64 _m64_popcnt(__s64 a);
- #define ia64_lfetch_fault __lfetch_fault
- #define ia64_lfetch_fault_excl __lfetch_fault_excl
-
--#define ia64_intrin_local_irq_restore(x) \
-+#define __ia64_intrin_local_irq_restore(x) \
- do { \
- if ((x) != 0) { \
-- ia64_ssm(IA64_PSR_I); \
-+ __ia64_ssm(IA64_PSR_I); \
- ia64_srlz_d(); \
- } else { \
-- ia64_rsm(IA64_PSR_I); \
-+ __ia64_rsm(IA64_PSR_I); \
- } \
- } while (0)
-
-+#define __ia64_get_psr_i() (__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
-+
- #endif /* _ASM_IA64_INTEL_INTRIN_H */
-diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
-index 46501b0..9a4afc3 100644
---- a/include/asm-ia64/meminit.h
-+++ b/include/asm-ia64/meminit.h
-@@ -17,10 +17,15 @@
- * - command line string
- * - kernel code & data
- * - Kernel memory map built from EFI memory map
-+ * - xen start info
- *
- * More could be added if necessary
- */
-+#ifndef CONFIG_XEN
- #define IA64_MAX_RSVD_REGIONS 6
-+#else
-+#define IA64_MAX_RSVD_REGIONS 7
-+#endif
-
- struct rsvd_region {
- unsigned long start; /* virtual address of beginning of element */
-diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
-index 7708ec6..e476c94 100644
---- a/include/asm-ia64/pal.h
-+++ b/include/asm-ia64/pal.h
-@@ -81,6 +81,7 @@
- #ifndef __ASSEMBLY__
-
- #include <linux/types.h>
-+#include <asm/processor.h>
- #include <asm/fpu.h>
-
- /*
-diff --git a/include/asm-ia64/privop.h b/include/asm-ia64/privop.h
-new file mode 100644
-index 0000000..2a3f783
---- /dev/null
-+++ b/include/asm-ia64/privop.h
-@@ -0,0 +1,59 @@
-+#ifndef _ASM_IA64_PRIVOP_H
-+#define _ASM_IA64_PRIVOP_H
-+
-+/*
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ * Dan Magenheimer <dan.magenheimer@hp.com>
-+ *
-+ */
-+
-+#include <linux/config.h>
-+#ifdef CONFIG_XEN
-+#include <asm/xen/privop.h>
-+#endif
-+
-+#ifndef __ASSEMBLY
-+
-+#ifndef IA64_PARAVIRTUALIZED
-+
-+#define ia64_getreg __ia64_getreg
-+#define ia64_setreg __ia64_setreg
-+#define ia64_hint __ia64_hint
-+#define ia64_thash __ia64_thash
-+#define ia64_itci __ia64_itci
-+#define ia64_itcd __ia64_itcd
-+#define ia64_itri __ia64_itri
-+#define ia64_itrd __ia64_itrd
-+#define ia64_tpa __ia64_tpa
-+#define ia64_set_ibr __ia64_set_ibr
-+#define ia64_set_pkr __ia64_set_pkr
-+#define ia64_set_pmc __ia64_set_pmc
-+#define ia64_set_pmd __ia64_set_pmd
-+#define ia64_set_rr __ia64_set_rr
-+#define ia64_get_cpuid __ia64_get_cpuid
-+#define ia64_get_ibr __ia64_get_ibr
-+#define ia64_get_pkr __ia64_get_pkr
-+#define ia64_get_pmc __ia64_get_pmc
-+#define ia64_get_pmd __ia64_get_pmd
-+#define ia64_get_rr __ia64_get_rr
-+#define ia64_fc __ia64_fc
-+#define ia64_ssm __ia64_ssm
-+#define ia64_rsm __ia64_rsm
-+#define ia64_ptce __ia64_ptce
-+#define ia64_ptcga __ia64_ptcga
-+#define ia64_ptcl __ia64_ptcl
-+#define ia64_ptri __ia64_ptri
-+#define ia64_ptrd __ia64_ptrd
-+#define ia64_get_psr_i __ia64_get_psr_i
-+#define ia64_intrin_local_irq_restore __ia64_intrin_local_irq_restore
-+#define ia64_pal_halt_light __ia64_pal_halt_light
-+#define ia64_leave_kernel __ia64_leave_kernel
-+#define ia64_leave_syscall __ia64_leave_syscall
-+#define ia64_switch_to __ia64_switch_to
-+#define ia64_pal_call_static __ia64_pal_call_static
-+
-+#endif /* !IA64_PARAVIRTUALIZED */
-+
-+#endif /* !__ASSEMBLY */
-+
-+#endif /* _ASM_IA64_PRIVOP_H */
-diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
-index 23c8e1b..99a1de4 100644
---- a/include/asm-ia64/processor.h
-+++ b/include/asm-ia64/processor.h
-@@ -19,6 +19,7 @@
- #include <asm/kregs.h>
- #include <asm/ptrace.h>
- #include <asm/ustack.h>
-+#include <asm/privop.h>
-
- #define IA64_NUM_DBG_REGS 8
- /*
-diff --git a/include/asm-ia64/synch_bitops.h b/include/asm-ia64/synch_bitops.h
-new file mode 100644
-index 0000000..ee13306
---- /dev/null
-+++ b/include/asm-ia64/synch_bitops.h
-@@ -0,0 +1,61 @@
-+#ifndef __XEN_SYNCH_BITOPS_H__
-+#define __XEN_SYNCH_BITOPS_H__
-+
-+/*
-+ * Copyright 1992, Linus Torvalds.
-+ * Heavily modified to provide guaranteed strong synchronisation
-+ * when communicating with Xen or other guest OSes running on other CPUs.
-+ */
-+
-+#include <linux/config.h>
-+
-+#define ADDR (*(volatile long *) addr)
-+
-+static __inline__ void synch_set_bit(int nr, volatile void * addr)
-+{
-+ set_bit(nr, addr);
-+}
-+
-+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-+{
-+ clear_bit(nr, addr);
-+}
-+
-+static __inline__ void synch_change_bit(int nr, volatile void * addr)
-+{
-+ change_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-+{
-+ return test_and_set_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-+{
-+ return test_and_clear_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-+{
-+ return test_and_change_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-+{
-+ return test_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-+{
-+ return test_bit(nr, addr);
-+}
-+
-+#define synch_cmpxchg ia64_cmpxchg4_acq
-+
-+#define synch_test_bit(nr,addr) \
-+(__builtin_constant_p(nr) ? \
-+ synch_const_test_bit((nr),(addr)) : \
-+ synch_var_test_bit((nr),(addr)))
-+
-+#endif /* __XEN_SYNCH_BITOPS_H__ */
-diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
-index 0625387..349c8d1 100644
---- a/include/asm-ia64/system.h
-+++ b/include/asm-ia64/system.h
-@@ -125,7 +125,7 @@ extern struct ia64_boot_param {
- #define __local_irq_save(x) \
- do { \
- ia64_stop(); \
-- (x) = ia64_getreg(_IA64_REG_PSR); \
-+ (x) = ia64_get_psr_i(); \
- ia64_stop(); \
- ia64_rsm(IA64_PSR_I); \
- } while (0)
-@@ -173,7 +173,7 @@ do { \
- #endif /* !CONFIG_IA64_DEBUG_IRQ */
-
- #define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
--#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
-+#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_get_psr_i(); })
-
- #define irqs_disabled() \
- ({ \
-diff --git a/include/asm-ia64/xen/privop.h b/include/asm-ia64/xen/privop.h
-new file mode 100644
-index 0000000..235197b
---- /dev/null
-+++ b/include/asm-ia64/xen/privop.h
-@@ -0,0 +1,272 @@
-+#ifndef _ASM_IA64_XEN_PRIVOP_H
-+#define _ASM_IA64_XEN_PRIVOP_H
-+
-+/*
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ * Dan Magenheimer <dan.magenheimer@hp.com>
-+ *
-+ * Paravirtualizations of privileged operations for Xen/ia64
-+ *
-+ */
-+
-+
-+#include <asm/xen/asm-xsi-offsets.h>
-+
-+#define IA64_PARAVIRTUALIZED
-+
-+#ifdef __ASSEMBLY__
-+#define XEN_HYPER_RFI break 0x1
-+#define XEN_HYPER_RSM_PSR_DT break 0x2
-+#define XEN_HYPER_SSM_PSR_DT break 0x3
-+#define XEN_HYPER_COVER break 0x4
-+#define XEN_HYPER_ITC_D break 0x5
-+#define XEN_HYPER_ITC_I break 0x6
-+#define XEN_HYPER_SSM_I break 0x7
-+#define XEN_HYPER_GET_IVR break 0x8
-+#define XEN_HYPER_GET_TPR break 0x9
-+#define XEN_HYPER_SET_TPR break 0xa
-+#define XEN_HYPER_EOI break 0xb
-+#define XEN_HYPER_SET_ITM break 0xc
-+#define XEN_HYPER_THASH break 0xd
-+#define XEN_HYPER_PTC_GA break 0xe
-+#define XEN_HYPER_ITR_D break 0xf
-+#define XEN_HYPER_GET_RR break 0x10
-+#define XEN_HYPER_SET_RR break 0x11
-+#define XEN_HYPER_SET_KR break 0x12
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+#ifdef MODULE
-+extern int is_running_on_xen(void);
-+#define running_on_xen (is_running_on_xen())
-+#else
-+extern int running_on_xen;
-+#endif
-+
-+#define XEN_HYPER_SSM_I asm("break 0x7");
-+#define XEN_HYPER_GET_IVR asm("break 0x8");
-+
-+/************************************************/
-+/* Instructions paravirtualized for correctness */
-+/************************************************/
-+
-+/* "fc" and "thash" are privilege-sensitive instructions, meaning they
-+ * may have different semantics depending on whether they are executed
-+ * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
-+ * be allowed to execute directly, lest incorrect semantics result. */
-+extern unsigned long xen_fc(unsigned long addr);
-+#define ia64_fc(addr) xen_fc((unsigned long)(addr))
-+extern unsigned long xen_thash(unsigned long addr);
-+#define ia64_thash(addr) xen_thash((unsigned long)(addr))
-+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
-+ * is not currently used (though it may be in a long-format VHPT system!)
-+ * and the semantics of cover only change if psr.ic is off which is very
-+ * rare (and currently non-existent outside of assembly code */
-+
-+/* There are also privilege-sensitive registers. These registers are
-+ * readable at any privilege level but only writable at PL0. */
-+extern unsigned long xen_get_cpuid(int index);
-+#define ia64_get_cpuid(i) xen_get_cpuid(i)
-+extern unsigned long xen_get_pmd(int index);
-+#define ia64_get_pmd(i) xen_get_pmd(i)
-+extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
-+extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
-+
-+/************************************************/
-+/* Instructions paravirtualized for performance */
-+/************************************************/
-+
-+/* Xen uses memory-mapped virtual privileged registers for access to many
-+ * performance-sensitive privileged registers. Some, like the processor
-+ * status register (psr), are broken up into multiple memory locations.
-+ * Others, like "pend", are abstractions based on privileged registers.
-+ * "Pend" is guaranteed to be set if reading cr.ivr would return a
-+ * (non-spurious) interrupt. */
-+#define xen_get_virtual_psr_i() (*(int *)(XSI_PSR_I))
-+#define xen_set_virtual_psr_i(_val) ({ *(int *)(XSI_PSR_I) = _val ? 1:0; })
-+#define xen_set_virtual_psr_ic(_val) ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
-+#define xen_get_virtual_pend() (*(int *)(XSI_PEND))
-+
-+/* Hyperprivops are "break" instructions with a well-defined API.
-+ * In particular, the virtual psr.ic bit must be off; in this way
-+ * it is guaranteed to never conflict with a linux break instruction.
-+ * Normally, this is done in a xen stub but this one is frequent enough
-+ * that we inline it */
-+#define xen_hyper_ssm_i() \
-+({ \
-+ xen_set_virtual_psr_i(0); \
-+ xen_set_virtual_psr_ic(0); \
-+ XEN_HYPER_SSM_I; \
-+})
-+
-+/* turning off interrupts can be paravirtualized simply by writing
-+ * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
-+#define xen_rsm_i() xen_set_virtual_psr_i(0)
-+
-+/* turning on interrupts is a bit more complicated.. write to the
-+ * memory-mapped virtual psr.i bit first (to avoid race condition),
-+ * then if any interrupts were pending, we have to execute a hyperprivop
-+ * to ensure the pending interrupt gets delivered; else we're done! */
-+#define xen_ssm_i() \
-+({ \
-+ int old = xen_get_virtual_psr_i(); \
-+ xen_set_virtual_psr_i(1); \
-+ if (!old && xen_get_virtual_pend()) xen_hyper_ssm_i(); \
-+})
-+
-+#define xen_ia64_intrin_local_irq_restore(x) \
-+{ \
-+ if (running_on_xen) { \
-+ if ((x) & IA64_PSR_I) { xen_ssm_i(); } \
-+ else { xen_rsm_i(); } \
-+ } \
-+ else __ia64_intrin_local_irq_restore((x)); \
-+}
-+
-+#define xen_get_psr_i() \
-+( \
-+ (running_on_xen) ? \
-+ (xen_get_virtual_psr_i() ? IA64_PSR_I : 0) \
-+ : __ia64_get_psr_i() \
-+)
-+
-+#define xen_ia64_ssm(mask) \
-+{ \
-+ if ((mask)==IA64_PSR_I) { \
-+ if (running_on_xen) { xen_ssm_i(); } \
-+ else { __ia64_ssm(mask); } \
-+ } \
-+ else { __ia64_ssm(mask); } \
-+}
-+
-+#define xen_ia64_rsm(mask) \
-+{ \
-+ if ((mask)==IA64_PSR_I) { \
-+ if (running_on_xen) { xen_rsm_i(); } \
-+ else { __ia64_rsm(mask); } \
-+ } \
-+ else { __ia64_rsm(mask); } \
-+}
-+
-+
-+/* Although all privileged operations can be left to trap and will
-+ * be properly handled by Xen, some are frequent enough that we use
-+ * hyperprivops for performance. */
-+
-+extern unsigned long xen_get_ivr(void);
-+extern unsigned long xen_get_tpr(void);
-+extern void xen_set_itm(unsigned long);
-+extern void xen_set_tpr(unsigned long);
-+extern void xen_eoi(void);
-+extern void xen_set_rr(unsigned long index, unsigned long val);
-+extern unsigned long xen_get_rr(unsigned long index);
-+extern void xen_set_kr(unsigned long index, unsigned long val);
-+
-+/* Note: It may look wrong to test for running_on_xen in each case.
-+ * However regnum is always a constant so, as written, the compiler
-+ * eliminates the switch statement, whereas running_on_xen must be
-+ * tested dynamically. */
-+#define xen_ia64_getreg(regnum) \
-+({ \
-+ __u64 ia64_intri_res; \
-+ \
-+ switch(regnum) { \
-+ case _IA64_REG_CR_IVR: \
-+ ia64_intri_res = (running_on_xen) ? \
-+ xen_get_ivr() : \
-+ __ia64_getreg(regnum); \
-+ break; \
-+ case _IA64_REG_CR_TPR: \
-+ ia64_intri_res = (running_on_xen) ? \
-+ xen_get_tpr() : \
-+ __ia64_getreg(regnum); \
-+ break; \
-+ case _IA64_REG_AR_EFLAG: \
-+ ia64_intri_res = (running_on_xen) ? \
-+ xen_get_eflag() : \
-+ __ia64_getreg(regnum); \
-+ break; \
-+ default: \
-+ ia64_intri_res = __ia64_getreg(regnum); \
-+ break; \
-+ } \
-+ ia64_intri_res; \
-+})
-+
-+#define xen_ia64_setreg(regnum,val) \
-+({ \
-+ switch(regnum) { \
-+ case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: \
-+ (running_on_xen) ? \
-+ xen_set_kr((regnum-_IA64_REG_AR_KR0), val) : \
-+ __ia64_setreg(regnum,val); \
-+ break; \
-+ case _IA64_REG_CR_ITM: \
-+ (running_on_xen) ? \
-+ xen_set_itm(val) : \
-+ __ia64_setreg(regnum,val); \
-+ break; \
-+ case _IA64_REG_CR_TPR: \
-+ (running_on_xen) ? \
-+ xen_set_tpr(val) : \
-+ __ia64_setreg(regnum,val); \
-+ break; \
-+ case _IA64_REG_CR_EOI: \
-+ (running_on_xen) ? \
-+ xen_eoi() : \
-+ __ia64_setreg(regnum,val); \
-+ break; \
-+ case _IA64_REG_AR_EFLAG: \
-+ (running_on_xen) ? \
-+ xen_set_eflag(val) : \
-+ __ia64_setreg(regnum,val); \
-+ break; \
-+ default: \
-+ __ia64_setreg(regnum,val); \
-+ break; \
-+ } \
-+})
-+
-+#define ia64_ssm xen_ia64_ssm
-+#define ia64_rsm xen_ia64_rsm
-+#define ia64_intrin_local_irq_restore xen_ia64_intrin_local_irq_restore
-+#define ia64_ptcga xen_ptcga
-+#define ia64_set_rr(index,val) xen_set_rr(index,val)
-+#define ia64_get_rr(index) xen_get_rr(index)
-+#define ia64_getreg xen_ia64_getreg
-+#define ia64_setreg xen_ia64_setreg
-+#define ia64_get_psr_i xen_get_psr_i
-+
-+/* the remainder of these are not performance-sensitive so its
-+ * OK to not paravirtualize and just take a privop trap and emulate */
-+#define ia64_hint __ia64_hint
-+#define ia64_set_pmd __ia64_set_pmd
-+#define ia64_itci __ia64_itci
-+#define ia64_itcd __ia64_itcd
-+#define ia64_itri __ia64_itri
-+#define ia64_itrd __ia64_itrd
-+#define ia64_tpa __ia64_tpa
-+#define ia64_set_ibr __ia64_set_ibr
-+#define ia64_set_pkr __ia64_set_pkr
-+#define ia64_set_pmc __ia64_set_pmc
-+#define ia64_get_ibr __ia64_get_ibr
-+#define ia64_get_pkr __ia64_get_pkr
-+#define ia64_get_pmc __ia64_get_pmc
-+#define ia64_ptce __ia64_ptce
-+#define ia64_ptcl __ia64_ptcl
-+#define ia64_ptri __ia64_ptri
-+#define ia64_ptrd __ia64_ptrd
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+/* these routines utilize privilege-sensitive or performance-sensitive
-+ * privileged instructions so the code must be replaced with
-+ * paravirtualized versions */
-+#define ia64_pal_halt_light xen_pal_halt_light
-+#define ia64_leave_kernel xen_leave_kernel
-+#define ia64_leave_syscall xen_leave_syscall
-+#define ia64_trace_syscall xen_trace_syscall
-+#define ia64_switch_to xen_switch_to
-+#define ia64_pal_call_static xen_pal_call_static
-+
-+#endif /* _ASM_IA64_XEN_PRIVOP_H */
-diff --git a/include/asm-um/page.h b/include/asm-um/page.h
-index 0229814..ee9bac8 100644
---- a/include/asm-um/page.h
-+++ b/include/asm-um/page.h
-@@ -118,7 +118,7 @@ extern unsigned long uml_physmem;
- extern struct page *arch_validate(struct page *page, gfp_t mask, int order);
- #define HAVE_ARCH_VALIDATE
-
--extern void arch_free_page(struct page *page, int order);
-+extern int arch_free_page(struct page *page, int order);
- #define HAVE_ARCH_FREE_PAGE
-
- #include <asm-generic/page.h>
-diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
-index bdbd893..79db75f 100644
---- a/include/asm-x86_64/apic.h
-+++ b/include/asm-x86_64/apic.h
-@@ -105,11 +105,13 @@ extern int disable_timer_pin_1;
-
- extern void setup_threshold_lvt(unsigned long lvt_off);
-
-+#ifndef CONFIG_XEN
- void smp_send_timer_broadcast_ipi(void);
- void switch_APIC_timer_to_ipi(void *cpumask);
- void switch_ipi_to_APIC_timer(void *cpumask);
-
- #define ARCH_APICTIMER_STOPS_ON_C3 1
-+#endif
-
- #endif /* CONFIG_X86_LOCAL_APIC */
-
-diff --git a/include/asm-x86_64/arch_hooks.h b/include/asm-x86_64/arch_hooks.h
-new file mode 100644
-index 0000000..b0647d6
---- /dev/null
-+++ b/include/asm-x86_64/arch_hooks.h
-@@ -0,0 +1,31 @@
-+#ifndef _ASM_ARCH_HOOKS_H
-+#define _ASM_ARCH_HOOKS_H
-+
-+#include <linux/interrupt.h>
-+
-+/*
-+ * linux/include/asm/arch_hooks.h
-+ *
-+ * define the architecture specific hooks
-+ */
-+
-+#ifdef CONFIG_XEN
-+
-+/* these aren't arch hooks, they are generic routines
-+ * that can be used by the hooks */
-+extern void init_ISA_irqs(void);
-+extern void apic_intr_init(void);
-+extern void smp_intr_init(void);
-+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-+
-+/* these are the defined hooks */
-+extern void intr_init_hook(void);
-+extern void pre_intr_init_hook(void);
-+extern void pre_setup_arch_hook(void);
-+extern void trap_init_hook(void);
-+extern void time_init_hook(void);
-+extern void mca_nmi_hook(void);
-+
-+#endif
-+
-+#endif
-diff --git a/include/asm-x86_64/bootsetup.h b/include/asm-x86_64/bootsetup.h
-index b829f7b..d1c3fb8 100644
---- a/include/asm-x86_64/bootsetup.h
-+++ b/include/asm-x86_64/bootsetup.h
-@@ -2,6 +2,8 @@
- #ifndef _X86_64_BOOTSETUP_H
- #define _X86_64_BOOTSETUP_H 1
-
-+#ifndef CONFIG_XEN
-+
- #define BOOT_PARAM_SIZE 4096
- extern char x86_boot_params[BOOT_PARAM_SIZE];
-
-@@ -31,6 +33,14 @@ extern char x86_boot_params[BOOT_PARAM_S
- #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
- #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
- #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
-+
-+#else
-+
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+
-+#endif
-+
- #define COMMAND_LINE saved_command_line
-
- #define RAMDISK_IMAGE_START_MASK 0x07FF
-diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
-index eb7723a..0e1c0a4 100644
---- a/include/asm-x86_64/desc.h
-+++ b/include/asm-x86_64/desc.h
-@@ -67,16 +67,36 @@ struct desc_ptr {
- unsigned long address;
- } __attribute__((packed)) ;
-
-+#ifndef CONFIG_XEN
-+
- #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
- #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
- #define clear_LDT() asm volatile("lldt %w0"::"r" (0))
-
-+#else
-+
-+static inline void clear_LDT(void)
-+{
-+ get_cpu();
-+
-+ /*
-+ * NB. We load the default_ldt for lcall7/27 handling on demand, as
-+ * it slows down context switching. Noone uses it anyway.
-+ */
-+ xen_set_ldt(0UL, 0);
-+ put_cpu();
-+}
-+
-+#endif
-+
- /*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
- extern struct desc_struct default_ldt[];
-+#ifndef CONFIG_X86_NO_IDT
- extern struct gate_struct idt_table[];
-+#endif
- extern struct desc_ptr cpu_gdt_descr[];
-
- /* the cpu gdt accessor */
-@@ -99,6 +119,7 @@ static inline void _set_gate(void *adr,
- memcpy(adr, &s, 16);
- }
-
-+#ifndef CONFIG_X86_NO_IDT
- static inline void set_intr_gate(int nr, void *func)
- {
- BUG_ON((unsigned)nr > 0xFF);
-@@ -117,10 +138,13 @@ static inline void set_system_gate(int n
- _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
- }
-
-+#ifndef CONFIG_XEN
-+
- static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
- {
- _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
- }
-+#endif
-
- static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
- unsigned size)
-@@ -138,6 +162,7 @@ static inline void set_tssldt_descriptor
- memcpy(ptr, &d, 16);
- }
-
-+#ifndef CONFIG_X86_NO_TSS
- static inline void set_tss_desc(unsigned cpu, void *addr)
- {
- /*
-@@ -151,6 +176,7 @@ static inline void set_tss_desc(unsigned
- (unsigned long)addr, DESC_TSS,
- IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
- }
-+#endif
-
- static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
- {
-@@ -168,6 +194,8 @@ static inline void set_seg_base(unsigned
- d->base2 = (addr >> 24) & 0xff;
- }
-
-+#endif
-+
- #define LDT_entry_a(info) \
- ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
- /* Don't allow setting of the lm bit. It is useless anyways because
-@@ -202,10 +230,18 @@ static inline void set_seg_base(unsigned
-
- static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
- {
-+#ifndef CONFIG_XEN
- u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
- gdt[0] = t->tls_array[0];
- gdt[1] = t->tls_array[1];
- gdt[2] = t->tls_array[2];
-+#else
-+#define C(i) \
-+ HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
-+
-+ C(0); C(1); C(2);
-+#undef C
-+#endif
- }
-
- /*
-@@ -219,9 +255,13 @@ static inline void load_LDT_nolock (mm_c
- clear_LDT();
- return;
- }
--
-+
-+#ifndef CONFIG_XEN
- set_ldt_desc(cpu, pc->ldt, count);
- load_LDT_desc();
-+#else
-+ xen_set_ldt((unsigned long)pc->ldt, count);
-+#endif
- }
-
- static inline void load_LDT(mm_context_t *pc)
-diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
-index 49a81a6..4e47ec0 100644
---- a/include/asm-x86_64/dma-mapping.h
-+++ b/include/asm-x86_64/dma-mapping.h
-@@ -8,6 +8,11 @@
-
- #include <linux/config.h>
-
-+#ifdef CONFIG_XEN
-+#include <linux/mm.h>
-+#include <asm/cache.h>
-+#endif
-+
- #include <asm/scatterlist.h>
- #include <asm/io.h>
- #include <asm/swiotlb.h>
-@@ -56,6 +61,8 @@ extern dma_addr_t bad_dma_address;
- extern struct dma_mapping_ops* dma_ops;
- extern int iommu_merge;
-
-+#ifndef CONFIG_XEN
-+
- static inline int dma_mapping_error(dma_addr_t dma_addr)
- {
- if (dma_ops->mapping_error)
-@@ -64,11 +71,40 @@ static inline int dma_mapping_error(dma_
- return (dma_addr == bad_dma_address);
- }
-
-+#else
-+
-+extern int dma_mapping_error(dma_addr_t dma_addr);
-+
-+static inline int
-+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-+{
-+ dma_addr_t mask = 0xffffffff;
-+ /* If the device has a mask, use it, otherwise default to 32 bits */
-+ if (hwdev && hwdev->dma_mask)
-+ mask = *hwdev->dma_mask;
-+ return (addr & ~mask) != 0;
-+}
-+
-+static inline int
-+range_straddles_page_boundary(void *p, size_t size)
-+{
-+ extern unsigned long *contiguous_bitmap;
-+ return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-+ !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
-+}
-+
-+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-+
-+#endif
-+
- extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
- extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-
-+#ifndef CONFIG_XEN
-+
- static inline dma_addr_t
- dma_map_single(struct device *hwdev, void *ptr, size_t size,
- int direction)
-@@ -83,11 +119,33 @@ dma_unmap_single(struct device *dev, dma
- dma_ops->unmap_single(dev, addr, size, direction);
- }
-
-+#else
-+
-+extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
-+ enum dma_data_direction direction);
-+extern void dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
-+ enum dma_data_direction direction);
-+
-+#endif
-+
-+#ifndef CONFIG_XEN
-+
- #define dma_map_page(dev,page,offset,size,dir) \
- dma_map_single((dev), page_address(page)+(offset), (size), (dir))
-
- #define dma_unmap_page dma_unmap_single
-
-+#else
-+
-+extern dma_addr_t
-+dma_map_page(struct device *hwdev, struct page *page, unsigned long offset,
-+ size_t size, enum dma_data_direction direction);
-+extern void
-+dma_unmap_page(struct device *hwdev, dma_addr_t dma_address, size_t size,
-+ enum dma_data_direction direction);
-+
-+#endif
-+
- static inline void
- dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-@@ -150,19 +208,31 @@ dma_sync_sg_for_device(struct device *hw
- flush_write_buffers();
- }
-
-+#ifndef CONFIG_XEN
-+
- static inline int
--dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
-+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+ enum dma_data_direction direction)
- {
- return dma_ops->map_sg(hwdev, sg, nents, direction);
- }
-
- static inline void
- dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-- int direction)
-+ enum dma_data_direction direction)
- {
- dma_ops->unmap_sg(hwdev, sg, nents, direction);
- }
-
-+#else
-+
-+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, enum dma_data_direction direction);
-+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+ int nents, enum dma_data_direction direction);
-+
-+#endif
-+
- extern int dma_supported(struct device *hwdev, u64 mask);
-
- /* same for gart, swiotlb, and nommu */
-@@ -173,7 +243,17 @@ static inline int dma_get_cache_alignmen
-
- #define dma_is_consistent(h) 1
-
-+#ifndef CONFIG_XEN
- extern int dma_set_mask(struct device *dev, u64 mask);
-+#else
-+static inline int dma_set_mask(struct device *dev, u64 mask)
-+{
-+ if (!dev->dma_mask || !dma_supported(dev, mask))
-+ return -EIO;
-+ *dev->dma_mask = mask;
-+ return 0;
-+}
-+#endif
-
- static inline void
- dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
-@@ -184,4 +264,20 @@ dma_cache_sync(void *vaddr, size_t size,
- extern struct device fallback_dev;
- extern int panic_on_overflow;
-
-+#ifdef CONFIG_XEN
-+
-+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-+extern int
-+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+ dma_addr_t device_addr, size_t size, int flags);
-+
-+extern void
-+dma_release_declared_memory(struct device *dev);
-+
-+extern void *
-+dma_mark_declared_memory_occupied(struct device *dev,
-+ dma_addr_t device_addr, size_t size);
-+
-+#endif
-+
- #endif /* _X8664_DMA_MAPPING_H */
-diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
-index 7b286bd..beaf264 100644
---- a/include/asm-x86_64/fixmap.h
-+++ b/include/asm-x86_64/fixmap.h
-@@ -17,6 +17,10 @@
- #include <asm/page.h>
- #include <asm/vsyscall.h>
- #include <asm/vsyscall32.h>
-+#include <asm/acpi.h>
-+#ifdef CONFIG_XEN
-+#include <xen/gnttab.h>
-+#endif
-
- /*
- * Here we define all the compile-time 'special' virtual
-@@ -45,6 +49,18 @@ enum fixed_addresses {
- FIX_IO_APIC_BASE_0,
- FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
- #endif
-+#ifdef CONFIG_XEN
-+#ifdef CONFIG_ACPI
-+ FIX_ACPI_BEGIN,
-+ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-+#endif
-+ FIX_SHARED_INFO,
-+ FIX_GNTTAB_BEGIN,
-+ FIX_GNTTAB_END = FIX_GNTTAB_BEGIN + NR_GRANT_FRAMES - 1,
-+#define NR_FIX_ISAMAPS 256
-+ FIX_ISAMAP_END,
-+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-+#endif
- __end_of_fixed_addresses
- };
-
-@@ -59,6 +75,9 @@ extern void __set_fixmap (enum fixed_add
- #define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-+#define clear_fixmap(idx) \
-+ __set_fixmap(idx, 0, __pgprot(0))
-+
- #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
- #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
- #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h
-index af7ded6..a494286 100644
---- a/include/asm-x86_64/floppy.h
-+++ b/include/asm-x86_64/floppy.h
-@@ -6,13 +6,15 @@
- * for more details.
- *
- * Copyright (C) 1995
-+ *
-+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
- */
- #ifndef __ASM_X86_64_FLOPPY_H
- #define __ASM_X86_64_FLOPPY_H
-
-+#ifndef CONFIG_XEN
- #include <linux/vmalloc.h>
-
--
- /*
- * The DMA channel used by the floppy controller cannot access data at
- * addresses >= 16MB
-@@ -25,7 +27,13 @@
- (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
-
- #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
--
-+#else
-+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-+#include <asm/dma.h>
-+#undef MAX_DMA_ADDRESS
-+#define MAX_DMA_ADDRESS 0
-+#define CROSS_64KB(a,s) (0)
-+#endif
-
- #define SW fd_routine[use_virtual_dma&1]
- #define CSW fd_routine[can_use_virtual_dma & 1]
-@@ -43,7 +51,9 @@
- #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
- #define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
-
-+#ifndef CONFIG_XEN
- #define FLOPPY_CAN_FALLBACK_ON_NODMA
-+#endif
-
- static int virtual_dma_count;
- static int virtual_dma_residue;
-@@ -153,11 +163,14 @@ static int fd_request_irq(void)
-
- }
-
-+#ifndef CONFIG_XEN
-+
- static unsigned long dma_mem_alloc(unsigned long size)
- {
- return __get_dma_pages(GFP_KERNEL,get_order(size));
- }
-
-+#endif
-
- static unsigned long vdma_mem_alloc(unsigned long size)
- {
-@@ -165,6 +178,8 @@ static unsigned long vdma_mem_alloc(unsi
-
- }
-
-+#ifndef CONFIG_XEN
-+
- #define nodma_mem_alloc(size) vdma_mem_alloc(size)
-
- static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
-@@ -193,6 +208,11 @@ static void _fd_chose_dma_mode(char *add
-
- #define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
-
-+#else
-+
-+#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-+
-+#endif
-
- static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
- {
-@@ -205,6 +225,7 @@ static int vdma_dma_setup(char *addr, un
- return 0;
- }
-
-+#ifndef CONFIG_XEN
- static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
- {
- #ifdef FLOPPY_SANITY_CHECK
-@@ -222,6 +243,7 @@ static int hard_dma_setup(char *addr, un
- enable_dma(FLOPPY_DMA);
- return 0;
- }
-+#endif
-
- static struct fd_routine_l {
- int (*_request_dma)(unsigned int dmanr, const char * device_id);
-@@ -231,11 +253,13 @@ static struct fd_routine_l {
- int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
- } fd_routine[] = {
- {
-+#ifndef CONFIG_XEN
- request_dma,
- free_dma,
- get_dma_residue,
- dma_mem_alloc,
- hard_dma_setup
-+#endif
- },
- {
- vdma_request_dma,
-@@ -247,7 +271,18 @@ static struct fd_routine_l {
- };
-
-
-+#ifndef CONFIG_XEN
- static int FDC1 = 0x3f0;
-+#else
-+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-+static int xen_floppy_init(void)
-+{
-+ use_virtual_dma = 1;
-+ can_use_virtual_dma = 1;
-+ return 0x3f0;
-+}
-+#define FDC1 xen_floppy_init()
-+#endif
- static int FDC2 = -1;
-
- /*
-@@ -278,7 +313,9 @@ static int FDC2 = -1;
-
- #define FLOPPY_MOTOR_MASK 0xf0
-
-+#ifndef CONFIG_XEN
- #define AUTO_DMA
-+#endif
-
- #define EXTRA_FLOPPY_PARAMS
-
-diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
-index 0df1715..d3f6028 100644
---- a/include/asm-x86_64/hw_irq.h
-+++ b/include/asm-x86_64/hw_irq.h
-@@ -33,6 +33,10 @@ struct hw_interrupt_type;
- */
- #define FIRST_EXTERNAL_VECTOR 0x20
-
-+#ifdef CONFIG_XEN
-+#define SYSCALL_VECTOR 0x80
-+#endif
-+
- #define IA32_SYSCALL_VECTOR 0x80
-
-
-@@ -49,6 +53,7 @@ struct hw_interrupt_type;
- */
- #define SPURIOUS_APIC_VECTOR 0xff
- #define ERROR_APIC_VECTOR 0xfe
-+#ifndef CONFIG_XEN
- #define RESCHEDULE_VECTOR 0xfd
- #define CALL_FUNCTION_VECTOR 0xfc
- /* fb free - please don't readd KDB here because it's useless
-@@ -60,6 +65,11 @@ struct hw_interrupt_type;
- #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
-
- #define NUM_INVALIDATE_TLB_VECTORS 8
-+#else
-+#define RESCHEDULE_VECTOR 0
-+#define CALL_FUNCTION_VECTOR 1
-+#define NR_IPIS 2
-+#endif
-
- /*
- * Local APIC timer IRQ vector is on a different priority level,
-@@ -130,7 +140,9 @@ __asm__( \
- "push $" #nr "-256 ; " \
- "jmp common_interrupt");
-
--#if defined(CONFIG_X86_IO_APIC)
-+#if defined(CONFIG_XEN)
-+extern void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i);
-+#elif defined(CONFIG_X86_IO_APIC)
- static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
- if (IO_APIC_IRQ(i))
- send_IPI_self(IO_APIC_VECTOR(i));
-diff --git a/include/asm-x86_64/hypercall.h b/include/asm-x86_64/hypercall.h
-new file mode 100644
-index 0000000..87bbfb4
---- /dev/null
-+++ b/include/asm-x86_64/hypercall.h
-@@ -0,0 +1,323 @@
-+/******************************************************************************
-+ * hypercall.h
-+ *
-+ * Linux-specific hypervisor handling.
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * 64-bit updates:
-+ * Benjamin Liu <benjamin.liu@intel.com>
-+ * Jun Nakajima <jun.nakajima@intel.com>
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
-+
-+#include <xen/interface/xen.h>
-+#include <xen/interface/sched.h>
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#define _hypercall0(type, name) \
-+({ \
-+ long __res; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res) \
-+ : \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall1(type, name, a1) \
-+({ \
-+ long __res, __ign1; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=D" (__ign1) \
-+ : "1" ((long)(a1)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall2(type, name, a1, a2) \
-+({ \
-+ long __res, __ign1, __ign2; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall3(type, name, a1, a2, a3) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3; \
-+ asm volatile ( \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
-+ "=d" (__ign3) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)) \
-+ : "memory" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall4(type, name, a1, a2, a3, a4) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3; \
-+ asm volatile ( \
-+ "movq %7,%%r10; " \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
-+ "=d" (__ign3) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)), "g" ((long)(a4)) \
-+ : "memory", "r10" ); \
-+ (type)__res; \
-+})
-+
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
-+({ \
-+ long __res, __ign1, __ign2, __ign3; \
-+ asm volatile ( \
-+ "movq %7,%%r10; movq %8,%%r8; " \
-+ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
-+ "=d" (__ign3) \
-+ : "1" ((long)(a1)), "2" ((long)(a2)), \
-+ "3" ((long)(a3)), "g" ((long)(a4)), \
-+ "g" ((long)(a5)) \
-+ : "memory", "r10", "r8" ); \
-+ (type)__res; \
-+})
-+
-+static inline int
-+HYPERVISOR_set_trap_table(
-+ trap_info_t *table)
-+{
-+ return _hypercall1(int, set_trap_table, table);
-+}
-+
-+static inline int
-+HYPERVISOR_mmu_update(
-+ mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_mmuext_op(
-+ struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_set_gdt(
-+ unsigned long *frame_list, int entries)
-+{
-+ return _hypercall2(int, set_gdt, frame_list, entries);
-+}
-+
-+static inline int
-+HYPERVISOR_stack_switch(
-+ unsigned long ss, unsigned long esp)
-+{
-+ return _hypercall2(int, stack_switch, ss, esp);
-+}
-+
-+static inline int
-+HYPERVISOR_set_callbacks(
-+ unsigned long event_address, unsigned long failsafe_address,
-+ unsigned long syscall_address)
-+{
-+ return _hypercall3(int, set_callbacks,
-+ event_address, failsafe_address, syscall_address);
-+}
-+
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+ int set)
-+{
-+ return _hypercall1(int, fpu_taskswitch, set);
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op(
-+ int cmd, unsigned long arg)
-+{
-+ return _hypercall2(int, sched_op, cmd, arg);
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(
-+ u64 timeout)
-+{
-+ return _hypercall1(long, set_timer_op, timeout);
-+}
-+
-+static inline int
-+HYPERVISOR_dom0_op(
-+ dom0_op_t *dom0_op)
-+{
-+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
-+ return _hypercall1(int, dom0_op, dom0_op);
-+}
-+
-+static inline int
-+HYPERVISOR_set_debugreg(
-+ int reg, unsigned long value)
-+{
-+ return _hypercall2(int, set_debugreg, reg, value);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+ int reg)
-+{
-+ return _hypercall1(unsigned long, get_debugreg, reg);
-+}
-+
-+static inline int
-+HYPERVISOR_update_descriptor(
-+ unsigned long ma, unsigned long word)
-+{
-+ return _hypercall2(int, update_descriptor, ma, word);
-+}
-+
-+static inline int
-+HYPERVISOR_memory_op(
-+ unsigned int cmd, void *arg)
-+{
-+ return _hypercall2(int, memory_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_multicall(
-+ void *call_list, int nr_calls)
-+{
-+ return _hypercall2(int, multicall, call_list, nr_calls);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+ unsigned long va, pte_t new_val, unsigned long flags)
-+{
-+ return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
-+}
-+
-+static inline int
-+HYPERVISOR_event_channel_op(
-+ void *op)
-+{
-+ return _hypercall1(int, event_channel_op, op);
-+}
-+
-+static inline int
-+HYPERVISOR_xen_version(
-+ int cmd, void *arg)
-+{
-+ return _hypercall2(int, xen_version, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_console_io(
-+ int cmd, int count, char *str)
-+{
-+ return _hypercall3(int, console_io, cmd, count, str);
-+}
-+
-+static inline int
-+HYPERVISOR_physdev_op(
-+ void *physdev_op)
-+{
-+ return _hypercall1(int, physdev_op, physdev_op);
-+}
-+
-+static inline int
-+HYPERVISOR_grant_table_op(
-+ unsigned int cmd, void *uop, unsigned int count)
-+{
-+ return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+ return _hypercall4(int, update_va_mapping_otherdomain, va,
-+ new_val.pte, flags, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_vm_assist(
-+ unsigned int cmd, unsigned int type)
-+{
-+ return _hypercall2(int, vm_assist, cmd, type);
-+}
-+
-+static inline int
-+HYPERVISOR_vcpu_op(
-+ int cmd, int vcpuid, void *extra_args)
-+{
-+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
-+
-+static inline int
-+HYPERVISOR_set_segment_base(
-+ int reg, unsigned long value)
-+{
-+ return _hypercall2(int, set_segment_base, reg, value);
-+}
-+
-+static inline int
-+HYPERVISOR_suspend(
-+ unsigned long srec)
-+{
-+ return _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+ SHUTDOWN_suspend, srec);
-+}
-+
-+static inline int
-+HYPERVISOR_nmi_op(
-+ unsigned long op,
-+ unsigned long arg)
-+{
-+ return _hypercall2(int, nmi_op, op, arg);
-+}
-+
-+#endif /* __HYPERCALL_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/asm-x86_64/hypervisor.h b/include/asm-x86_64/hypervisor.h
-new file mode 100644
-index 0000000..dc3e5e5
---- /dev/null
-+++ b/include/asm-x86_64/hypervisor.h
-@@ -0,0 +1,2 @@
-+
-+#include <asm-i386/hypervisor.h>
-diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
-index 876eb9a..c58262d 100644
---- a/include/asm-x86_64/i387.h
-+++ b/include/asm-x86_64/i387.h
-@@ -170,10 +170,15 @@ static inline void kernel_fpu_end(void)
- preempt_enable();
- }
-
--static inline void save_init_fpu(struct task_struct *tsk)
-+static inline void __save_init_fpu(struct task_struct *tsk)
- {
- __fxsave_clear(tsk);
- task_thread_info(tsk)->status &= ~TS_USEDFPU;
-+}
-+
-+static inline void save_init_fpu(struct task_struct *tsk)
-+{
-+ __save_init_fpu(tsk);
- stts();
- }
-
-diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
-index 9dac18d..78a85cd 100644
---- a/include/asm-x86_64/io.h
-+++ b/include/asm-x86_64/io.h
-@@ -2,6 +2,9 @@
- #define _ASM_IO_H
-
- #include <linux/config.h>
-+#ifdef CONFIG_XEN
-+#include <asm/fixmap.h>
-+#endif
-
- /*
- * This file contains the definitions for the x86 IO instructions
-@@ -119,12 +122,33 @@ static inline void * phys_to_virt(unsign
- {
- return __va(address);
- }
-+
-+#ifdef CONFIG_XEN
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+#endif
-+
- #endif
-
- /*
- * Change "struct page" to physical address.
- */
-+#ifndef CONFIG_XEN
- #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#else
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
-+
-+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
-+ (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
-+ (unsigned long) (bv)->bv_offset)
-+
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
-+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+ bvec_to_pseudophys((vec2))))
-+#endif
-
- #include <asm-generic/iomap.h>
-
-@@ -151,9 +175,15 @@ extern void iounmap(volatile void __iome
- /*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-+#ifndef CONFIG_XEN
- #define isa_virt_to_bus virt_to_phys
- #define isa_page_to_bus page_to_phys
- #define isa_bus_to_virt phys_to_virt
-+#else
-+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
-+#endif
-
- /*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
-@@ -161,8 +191,13 @@ extern void iounmap(volatile void __iome
- *
- * Allow them on x86 for legacy drivers, though.
- */
-+#ifndef CONFIG_XEN
- #define virt_to_bus virt_to_phys
- #define bus_to_virt phys_to_virt
-+#else
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+#endif
-
- /*
- * readX/writeX() are used to access memory mapped devices. On some
-@@ -267,7 +302,11 @@ void memset_io(volatile void __iomem *a,
- * used as the IO-area pointer (it can be iounmapped as well, so the
- * analogy with PCI is quite large):
- */
-+#ifndef CONFIG_XEN
- #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
-+#else
-+#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
-+#endif
-
- #define isa_readb(a) readb(__ISA_IO_base + (a))
- #define isa_readw(a) readw(__ISA_IO_base + (a))
-@@ -338,4 +377,8 @@ extern int iommu_bio_merge;
-
- #endif /* __KERNEL__ */
-
-+#ifdef CONFIG_XEN
-+#define ARCH_HAS_DEV_MEM
-+#endif
-+
- #endif
-diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
-index 2a5c162..e61f2b2 100644
---- a/include/asm-x86_64/ipi.h
-+++ b/include/asm-x86_64/ipi.h
-@@ -23,6 +23,8 @@
- #include <asm/apicdef.h>
- #include <asm/genapic.h>
-
-+#ifndef CONFIG_XEN
-+
- /*
- * the following functions deal with sending IPIs between CPUs.
- *
-@@ -114,4 +116,10 @@ static inline void send_IPI_mask_sequenc
- local_irq_restore(flags);
- }
-
-+#else
-+
-+extern void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
-+
-+#endif
-+
- #endif /* __ASM_IPI_H */
-diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h
-index 9db5a1b..232a5e3 100644
---- a/include/asm-x86_64/irq.h
-+++ b/include/asm-x86_64/irq.h
-@@ -10,7 +10,7 @@
- * <tomsoft@informatik.tu-chemnitz.de>
- */
-
--#define TIMER_IRQ 0
-+#include <linux/config.h>
-
- /*
- * 16 8259A IRQ's, 208 potential APIC interrupt sources.
-@@ -31,6 +31,10 @@
-
- #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
-
-+#ifndef CONFIG_XEN
-+
-+#define TIMER_IRQ 0
-+
- #ifdef CONFIG_PCI_MSI
- #define NR_IRQS FIRST_SYSTEM_VECTOR
- #define NR_IRQ_VECTORS NR_IRQS
-@@ -39,6 +43,35 @@
- #define NR_IRQ_VECTORS (32 * NR_CPUS)
- #endif
-
-+#else /* CONFIG_XEN */
-+
-+/*
-+ * The flat IRQ space is divided into two regions:
-+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
-+ * if we have physical device-access privilege. This region is at the
-+ * start of the IRQ space so that existing device drivers do not need
-+ * to be modified to translate physical IRQ numbers into our IRQ space.
-+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ * are bound using the provided bind/unbind functions.
-+ */
-+
-+#define PIRQ_BASE 0
-+#define NR_PIRQS 256
-+
-+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS 256
-+
-+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS NR_IRQS
-+
-+#define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
-+
-+#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
-+
-+#endif /* CONFIG_XEN */
-+
- static __inline__ int irq_canonicalize(int irq)
- {
- return ((irq == 2) ? 9 : irq);
-@@ -48,6 +81,10 @@ static __inline__ int irq_canonicalize(i
- #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
- #endif
-
-+#ifdef CONFIG_XEN
-+#define irq_ctx_init(cpu) do { } while (0)
-+#endif
-+
- #ifdef CONFIG_HOTPLUG_CPU
- #include <linux/cpumask.h>
- extern void fixup_irqs(cpumask_t map);
-diff --git a/include/asm-x86_64/mach-xen/asm/nmi.h b/include/asm-x86_64/mach-xen/asm/nmi.h
-new file mode 100644
-index 0000000..545a192
---- /dev/null
-+++ b/include/asm-x86_64/mach-xen/asm/nmi.h
-@@ -0,0 +1,75 @@
-+/*
-+ * linux/include/asm-i386/nmi.h
-+ */
-+#ifndef ASM_NMI_H
-+#define ASM_NMI_H
-+
-+#include <linux/pm.h>
-+
-+#include <xen/interface/nmi.h>
-+
-+struct pt_regs;
-+
-+typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
-+
-+/**
-+ * set_nmi_callback
-+ *
-+ * Set a handler for an NMI. Only one handler may be
-+ * set. Return 1 if the NMI was handled.
-+ */
-+void set_nmi_callback(nmi_callback_t callback);
-+
-+/**
-+ * unset_nmi_callback
-+ *
-+ * Remove the handler previously set.
-+ */
-+void unset_nmi_callback(void);
-+
-+#ifdef CONFIG_PM
-+
-+/** Replace the PM callback routine for NMI. */
-+struct pm_dev * set_nmi_pm_callback(pm_callback callback);
-+
-+/** Unset the PM callback routine back to the default. */
-+void unset_nmi_pm_callback(struct pm_dev * dev);
-+
-+#else
-+
-+static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
-+{
-+ return 0;
-+}
-+
-+static inline void unset_nmi_pm_callback(struct pm_dev * dev)
-+{
-+}
-+
-+#endif /* CONFIG_PM */
-+
-+extern void default_do_nmi(struct pt_regs *);
-+extern void die_nmi(char *str, struct pt_regs *regs);
-+
-+static inline unsigned char get_nmi_reason(void)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ unsigned char reason = 0;
-+
-+ /* construct a value which looks like it came from
-+ * port 0x61.
-+ */
-+ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+ reason |= 0x40;
-+ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+ reason |= 0x80;
-+
-+ return reason;
-+}
-+
-+extern int panic_on_timeout;
-+extern int unknown_nmi_panic;
-+
-+extern int check_nmi_watchdog(void);
-+
-+#endif /* ASM_NMI_H */
-diff --git a/include/asm-x86_64/mmu.h b/include/asm-x86_64/mmu.h
-index 5dc6ed7..5a5547c 100644
---- a/include/asm-x86_64/mmu.h
-+++ b/include/asm-x86_64/mmu.h
-@@ -15,6 +15,19 @@ typedef struct {
- rwlock_t ldtlock;
- int size;
- struct semaphore sem;
-+#ifdef CONFIG_XEN
-+ unsigned pinned:1;
-+ struct list_head unpinned;
-+#endif
- } mm_context_t;
-
-+#ifdef CONFIG_XEN
-+extern struct list_head mm_unpinned;
-+extern spinlock_t mm_unpinned_lock;
-+
-+/* mm/memory.c:exit_mmap hook */
-+extern void _arch_exit_mmap(struct mm_struct *mm);
-+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
-+#endif
-+
- #endif
-diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
-index 16e4be4..ee83836 100644
---- a/include/asm-x86_64/mmu_context.h
-+++ b/include/asm-x86_64/mmu_context.h
-@@ -17,35 +17,117 @@ void destroy_context(struct mm_struct *m
-
- static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
- {
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
- if (read_pda(mmu_state) == TLBSTATE_OK)
- write_pda(mmu_state, TLBSTATE_LAZY);
- #endif
- }
-
-+#ifndef CONFIG_XEN
-+
- static inline void load_cr3(pgd_t *pgd)
- {
- asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory");
- }
-
-+#define __SWITCH_DECLS
-+#define __switch_pgd(next, cpu) load_cr3((next)->pgd)
-+#define __switch_ldt(next, cpu) load_LDT_nolock(&(next)->context, cpu);
-+#define __switch_commit() 0
-+
-+#else
-+
-+static inline void load_cr3(pgd_t *pgd)
-+{
-+ asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
-+ "memory");
-+}
-+
-+#define prepare_arch_switch(next) __prepare_arch_switch()
-+
-+static inline void __prepare_arch_switch(void)
-+{
-+ /*
-+ * Save away %es, %ds, %fs and %gs. Must happen before reload
-+ * of cr3/ldt (i.e., not in __switch_to).
-+ */
-+ __asm__ __volatile__ (
-+ "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
-+ : "=m" (current->thread.es),
-+ "=m" (current->thread.ds),
-+ "=m" (current->thread.fsindex),
-+ "=m" (current->thread.gsindex) );
-+
-+ if (current->thread.ds)
-+ __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
-+
-+ if (current->thread.es)
-+ __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
-+
-+ if (current->thread.fsindex) {
-+ __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
-+ current->thread.fs = 0;
-+ }
-+
-+ if (current->thread.gsindex) {
-+ load_gs_index(0);
-+ current->thread.gs = 0;
-+ }
-+}
-+
-+extern void mm_pin(struct mm_struct *mm);
-+extern void mm_unpin(struct mm_struct *mm);
-+void mm_pin_all(void);
-+
-+#define __SWITCH_DECLS struct mmuext_op ops[3], *op = ops
-+
-+static inline struct mmuext_op *__switch_pgd(struct mm_struct *next, int cpu, struct mmuext_op *op)
-+{
-+ if (!next->context.pinned)
-+ mm_pin(next);
-+ op->cmd = MMUEXT_NEW_BASEPTR;
-+ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
-+ ++op;
-+ /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
-+ op->cmd = MMUEXT_NEW_USER_BASEPTR;
-+ op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
-+ return op + 1;
-+}
-+#define __switch_pgd(next, cpu) ((void)(op = __switch_pgd(next, cpu, op)))
-+
-+static inline struct mmuext_op *__switch_ldt(struct mm_struct *next, int cpu, struct mmuext_op *op)
-+{
-+ op->cmd = MMUEXT_SET_LDT;
-+ op->arg1.linear_addr = (unsigned long)next->context.ldt;
-+ op->arg2.nr_ents = next->context.size;
-+ return op + 1;
-+}
-+#define __switch_ldt(next, cpu) ((void)(op = __switch_ldt(next, cpu, op)))
-+
-+#define __switch_commit() HYPERVISOR_mmuext_op(ops, op - ops, NULL, DOMID_SELF)
-+
-+#endif
-+
- static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
- {
- unsigned cpu = smp_processor_id();
-+ __SWITCH_DECLS;
-+
- if (likely(prev != next)) {
- /* stop flush ipis for the previous mm */
- clear_bit(cpu, &prev->cpu_vm_mask);
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
- write_pda(mmu_state, TLBSTATE_OK);
- write_pda(active_mm, next);
- #endif
- set_bit(cpu, &next->cpu_vm_mask);
-- load_cr3(next->pgd);
-+ __switch_pgd(next, cpu);
-
- if (unlikely(next->context.ldt != prev->context.ldt))
-- load_LDT_nolock(&next->context, cpu);
-+ __switch_ldt(next, cpu);
- }
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
- else {
- write_pda(mmu_state, TLBSTATE_OK);
- if (read_pda(active_mm) != next)
-@@ -55,13 +137,19 @@ static inline void switch_mm(struct mm_s
- * tlb flush IPI delivery. We must reload CR3
- * to make sure to use no freed page tables.
- */
-- load_cr3(next->pgd);
-- load_LDT_nolock(&next->context, cpu);
-+ __switch_pgd(next, cpu);
-+ __switch_ldt(next, cpu);
- }
- }
- #endif
-+ BUG_ON(__switch_commit());
- }
-
-+#undef __switch_commit
-+#undef __switch_ldt
-+#undef __switch_pgd
-+#undef __SWITCH_DECLS
-+
- #define deactivate_mm(tsk,mm) do { \
- load_gs_index(0); \
- asm volatile("movl %0,%%fs"::"r"(0)); \
-diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
-index 615e3e4..e62161d 100644
---- a/include/asm-x86_64/page.h
-+++ b/include/asm-x86_64/page.h
-@@ -3,6 +3,32 @@
-
- #include <linux/config.h>
-
-+#ifdef CONFIG_XEN
-+
-+#ifndef __ASSEMBLY__
-+#include <linux/types.h>
-+#include <asm/bug.h>
-+#include <xen/features.h>
-+#endif
-+#include <xen/interface/xen.h>
-+#include <xen/foreign_page.h>
-+
-+#define arch_free_page(_page,_order) \
-+({ int foreign = PageForeign(_page); \
-+ if (foreign) \
-+ (PageForeignDestructor(_page))(_page); \
-+ foreign; \
-+})
-+#define HAVE_ARCH_FREE_PAGE
-+
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
-+
-+#endif
-+
- /* PAGE_SHIFT determines the page size */
- #define PAGE_SHIFT 12
- #ifdef __ASSEMBLY__
-@@ -54,6 +80,119 @@ void copy_page(void *, void *);
-
- #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
- #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-+
-+#ifdef CONFIG_XEN
-+
-+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-+#define INVALID_P2M_ENTRY (~0UL)
-+#define FOREIGN_FRAME_BIT (1UL<<63)
-+#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
-+
-+extern unsigned long *phys_to_machine_mapping;
-+
-+static inline unsigned long pfn_to_mfn(unsigned long pfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return pfn;
-+ return phys_to_machine_mapping[(unsigned int)(pfn)] &
-+ ~FOREIGN_FRAME_BIT;
-+}
-+
-+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return 1;
-+ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
-+}
-+
-+static inline unsigned long mfn_to_pfn(unsigned long mfn)
-+{
-+ unsigned long pfn;
-+
-+ if (xen_feature(XENFEAT_auto_translated_physmap))
-+ return mfn;
-+
-+ /*
-+ * The array access can fail (e.g., device space beyond end of RAM).
-+ * In such cases it doesn't matter what we return (we return garbage),
-+ * but we must handle the fault without crashing!
-+ */
-+ asm (
-+ "1: movq %1,%0\n"
-+ "2:\n"
-+ ".section __ex_table,\"a\"\n"
-+ " .align 8\n"
-+ " .quad 1b,2b\n"
-+ ".previous"
-+ : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
-+
-+ return pfn;
-+}
-+
-+/*
-+ * We detect special mappings in one of two ways:
-+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
-+ * to be outside our maximum possible pseudophys range.
-+ * 2. If the MFN belongs to a different domain then we will certainly
-+ * not have MFN in our p2m table. Conversely, if the page is ours,
-+ * then we'll have p2m(m2p(MFN))==MFN.
-+ * If we detect a special mapping then it doesn't have a 'struct page'.
-+ * We force !pfn_valid() by returning an out-of-range pointer.
-+ *
-+ * NB. These checks require that, for any MFN that is not in our reservation,
-+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
-+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
-+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
-+ *
-+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
-+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
-+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
-+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
-+ */
-+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
-+{
-+ unsigned long pfn = mfn_to_pfn(mfn);
-+ if ((pfn < end_pfn)
-+ && !xen_feature(XENFEAT_auto_translated_physmap)
-+ && (phys_to_machine_mapping[pfn] != mfn))
-+ return end_pfn; /* force !pfn_valid() */
-+ return pfn;
-+}
-+
-+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-+{
-+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
-+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-+ return;
-+ }
-+ phys_to_machine_mapping[pfn] = mfn;
-+}
-+
-+/* Definitions for machine and pseudophysical addresses. */
-+typedef unsigned long paddr_t;
-+typedef unsigned long maddr_t;
-+
-+static inline maddr_t phys_to_machine(paddr_t phys)
-+{
-+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-+ return machine;
-+}
-+
-+static inline paddr_t machine_to_phys(maddr_t machine)
-+{
-+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-+ return phys;
-+}
-+
-+/* VIRT <-> MACHINE conversion */
-+#define virt_to_machine(v) (phys_to_machine(__pa(v)))
-+#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
-+#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
-+
-+#endif
-+
- /*
- * These are used to make use of C type-checking..
- */
-@@ -65,16 +204,78 @@ typedef struct { unsigned long pgd; } pg
-
- typedef struct { unsigned long pgprot; } pgprot_t;
-
-+#ifndef CONFIG_XEN
- #define pte_val(x) ((x).pte)
- #define pmd_val(x) ((x).pmd)
- #define pud_val(x) ((x).pud)
- #define pgd_val(x) ((x).pgd)
-+#else
-+static inline unsigned long pte_val(pte_t x)
-+{
-+ unsigned long ret = x.pte;
-+ if (ret & 1) ret = machine_to_phys(ret);
-+ return ret;
-+}
-+
-+#define pte_val_ma(x) ((x).pte)
-+
-+static inline unsigned long pmd_val(pmd_t x)
-+{
-+ unsigned long ret = x.pmd;
-+ if (ret) ret = machine_to_phys(ret);
-+ return ret;
-+}
-+
-+static inline unsigned long pud_val(pud_t x)
-+{
-+ unsigned long ret = x.pud;
-+ if (ret) ret = machine_to_phys(ret);
-+ return ret;
-+}
-+
-+static inline unsigned long pgd_val(pgd_t x)
-+{
-+ unsigned long ret = x.pgd;
-+ if (ret) ret = machine_to_phys(ret);
-+ return ret;
-+}
-+#endif
-+
- #define pgprot_val(x) ((x).pgprot)
-
-+#ifndef CONFIG_XEN
- #define __pte(x) ((pte_t) { (x) } )
- #define __pmd(x) ((pmd_t) { (x) } )
- #define __pud(x) ((pud_t) { (x) } )
- #define __pgd(x) ((pgd_t) { (x) } )
-+#else
-+static inline pte_t __pte(unsigned long x)
-+{
-+ if (x & 1) x = phys_to_machine(x);
-+ return ((pte_t) { (x) });
-+}
-+
-+#define __pte_ma(x) ((pte_t) { (x) } )
-+
-+static inline pmd_t __pmd(unsigned long x)
-+{
-+ if ((x & 1)) x = phys_to_machine(x);
-+ return ((pmd_t) { (x) });
-+}
-+
-+static inline pud_t __pud(unsigned long x)
-+{
-+ if ((x & 1)) x = phys_to_machine(x);
-+ return ((pud_t) { (x) });
-+}
-+
-+static inline pgd_t __pgd(unsigned long x)
-+{
-+ if ((x & 1)) x = phys_to_machine(x);
-+ return ((pgd_t) { (x) });
-+}
-+#endif
-+
- #define __pgprot(x) ((pgprot_t) { (x) } )
-
- #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
-@@ -89,6 +290,13 @@ typedef struct { unsigned long pgprot; }
- #define __PAGE_OFFSET 0xffff810000000000
- #endif /* !__ASSEMBLY__ */
-
-+#ifdef CONFIG_XEN
-+#undef __PAGE_OFFSET
-+#define __PAGE_OFFSET HYPERVISOR_VIRT_END
-+#undef LOAD_OFFSET
-+#define LOAD_OFFSET 0
-+#endif
-+
- /* to align the pointer to the (next) page boundary */
- #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-@@ -101,12 +309,6 @@ typedef struct { unsigned long pgprot; }
- #define KERNEL_TEXT_SIZE (40UL*1024*1024)
- #define KERNEL_TEXT_START 0xffffffff80000000UL
-
--#ifndef __ASSEMBLY__
--
--#include <asm/bug.h>
--
--#endif /* __ASSEMBLY__ */
--
- #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
-
- /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
-diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
-index 8a05af2..1a9904f 100644
---- a/include/asm-x86_64/pci.h
-+++ b/include/asm-x86_64/pci.h
-@@ -76,6 +76,23 @@ extern int iommu_sac_force;
- #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
-
-+#elif defined(CONFIG_XEN) && defined(CONFIG_SWIOTLB)
-+
-+#define pci_dac_dma_supported(pci_dev, mask) 1
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
-+ dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
-+ __u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME) \
-+ ((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
-+ (((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME) \
-+ ((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
-+ (((PTR)->LEN_NAME) = (VAL))
-+
- #else
- /* No IOMMU */
-
-@@ -147,4 +164,13 @@ static inline void pcibios_add_platform_
- #include <asm-generic/pci.h>
- #endif
-
-+#ifdef CONFIG_XEN
-+/* On Xen we have to scan all functions since Xen hides bridges from
-+ * us. If a bridge is at fn=0 and that slot has a multifunction
-+ * device, we won't find the additional devices without scanning all
-+ * functions. */
-+#undef pcibios_scan_all_fns
-+#define pcibios_scan_all_fns(a, b) 1
-+#endif
-+
- #endif /* __x8664_PCI_H */
-diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
-index c7ab38a..64dcbc5 100644
---- a/include/asm-x86_64/pda.h
-+++ b/include/asm-x86_64/pda.h
-@@ -2,6 +2,7 @@
- #define X86_64_PDA_H
-
- #ifndef __ASSEMBLY__
-+#include <linux/config.h>
- #include <linux/stddef.h>
- #include <linux/types.h>
- #include <linux/cache.h>
-@@ -22,8 +23,10 @@ struct x8664_pda {
- int nodenumber; /* number of current node */
- unsigned int __softirq_pending;
- unsigned int __nmi_count; /* number of NMI on this CPUs */
-+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
- struct mm_struct *active_mm;
-- int mmu_state;
-+ int mmu_state;
-+#endif
- unsigned apic_timer_irqs;
- } ____cacheline_aligned_in_smp;
-
-diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
-index 08cad24..ada72f7 100644
---- a/include/asm-x86_64/pgalloc.h
-+++ b/include/asm-x86_64/pgalloc.h
-@@ -6,26 +6,90 @@
- #include <linux/threads.h>
- #include <linux/mm.h>
-
-+#ifdef CONFIG_XEN
-+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
-+
-+#include <xen/features.h>
-+void make_page_readonly(void *va, unsigned int feature);
-+void make_page_writable(void *va, unsigned int feature);
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
-+
-+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
-+#endif
-+
- #define pmd_populate_kernel(mm, pmd, pte) \
- set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
--#define pud_populate(mm, pud, pmd) \
-- set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
--#define pgd_populate(mm, pgd, pud) \
-- set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
-
- static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
- {
-+#ifndef CONFIG_XEN
- set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
-+#else
-+ if (unlikely((mm)->context.pinned)) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
-+ pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
-+ set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
-+ } else {
-+ *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
-+ }
-+#endif
- }
-
--static inline pmd_t *get_pmd(void)
-+#ifndef CONFIG_XEN
-+#define pud_populate(mm, pud, pmd) \
-+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
-+#else
-+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
- {
-- return (pmd_t *)get_zeroed_page(GFP_KERNEL);
-+ if (unlikely((mm)->context.pinned)) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pmd,
-+ pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
-+ PAGE_KERNEL_RO), 0));
-+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
-+ } else {
-+ *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
-+ }
- }
-+#endif
-+
-+#ifndef CONFIG_XEN
-+#define pgd_populate(mm, pgd, pud) \
-+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
-+#else
-+/*
-+ * We need to use the batch mode here, but pgd_populate() won't be
-+ * be called frequently.
-+ */
-+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
-+{
-+ if (unlikely((mm)->context.pinned)) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pud,
-+ pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
-+ PAGE_KERNEL_RO), 0));
-+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
-+ set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
-+ } else {
-+ *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
-+ *(__user_pgd(pgd)) = *(pgd);
-+ }
-+}
-+#endif
-
- static inline void pmd_free(pmd_t *pmd)
- {
- BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-+#ifdef CONFIG_XEN
-+ if (!pte_write(*virt_to_ptep(pmd))) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pmd,
-+ pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
-+ 0));
-+ }
-+#endif
- free_page((unsigned long)pmd);
- }
-
-@@ -42,13 +106,28 @@ static inline pud_t *pud_alloc_one(struc
- static inline void pud_free (pud_t *pud)
- {
- BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-+#ifdef CONFIG_XEN
-+ if (!pte_write(*virt_to_ptep(pud))) {
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pud,
-+ pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
-+ 0));
-+ }
-+#endif
- free_page((unsigned long)pud);
- }
-
- static inline pgd_t *pgd_alloc(struct mm_struct *mm)
- {
- unsigned boundary;
-+#ifndef CONFIG_XEN
- pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-+#else
-+ /*
-+ * We allocate two contiguous pages for kernel and user.
-+ */
-+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
-+#endif
- if (!pgd)
- return NULL;
- /*
-@@ -61,26 +140,56 @@ static inline pgd_t *pgd_alloc(struct mm
- memcpy(pgd + boundary,
- init_level4_pgt + boundary,
- (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
-+#ifdef CONFIG_XEN
-+ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
-+ /*
-+ * Set level3_user_pgt for vsyscall area.
-+ */
-+ set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
-+ mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
-+#endif
- return pgd;
- }
-
- static inline void pgd_free(pgd_t *pgd)
- {
- BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
-+#ifndef CONFIG_XEN
- free_page((unsigned long)pgd);
-+#else
-+ if (!pte_write(*virt_to_ptep(pgd))) {
-+ xen_pgd_unpin(__pa(pgd));
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)pgd,
-+ pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
-+ 0));
-+ }
-+ if (!pte_write(*virt_to_ptep(__user_pgd(pgd)))) {
-+ xen_pgd_unpin(__pa(__user_pgd(pgd)));
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ (unsigned long)__user_pgd(pgd),
-+ pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
-+ PAGE_KERNEL),
-+ 0));
-+ }
-+ free_pages((unsigned long)pgd, 1);
-+#endif
- }
-
- static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
- {
-- return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+
-+#ifdef CONFIG_XEN
-+ if (pte)
-+ make_page_readonly(pte, XENFEAT_writable_page_tables);
-+#endif
-+ return pte;
- }
-
- static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
- {
-- void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-- if (!p)
-- return NULL;
-- return virt_to_page(p);
-+ return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
- }
-
- /* Should really implement gc for free page table pages. This could be
-@@ -89,17 +198,33 @@ static inline struct page *pte_alloc_one
- static inline void pte_free_kernel(pte_t *pte)
- {
- BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
-+#ifdef CONFIG_XEN
-+ xen_pte_unpin(__pa(pte));
-+ make_page_writable(pte, XENFEAT_writable_page_tables);
-+#endif
- free_page((unsigned long)pte);
- }
-
- static inline void pte_free(struct page *pte)
- {
-+#ifdef CONFIG_XEN
-+ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
-+
-+ if (!pte_write(*virt_to_ptep(va)))
-+ BUG_ON(HYPERVISOR_update_va_mapping(
-+ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
-+#endif
- __free_page(pte);
- }
-
-+#ifndef CONFIG_XEN
- #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
--
- #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
- #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
-+#else
-+#define __pte_free_tlb(tlb,x) pte_free((x))
-+#define __pmd_free_tlb(tlb,x) pmd_free((x))
-+#define __pud_free_tlb(tlb,x) pud_free((x))
-+#endif
-
- #endif /* _X86_64_PGALLOC_H */
-diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
-index 715fd94..11dce27 100644
---- a/include/asm-x86_64/pgtable.h
-+++ b/include/asm-x86_64/pgtable.h
-@@ -10,6 +10,28 @@
- #include <asm/bitops.h>
- #include <linux/threads.h>
- #include <asm/pda.h>
-+#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
-+
-+extern pud_t level3_user_pgt[512];
-+extern pud_t init_level4_user_pgt[];
-+
-+extern void xen_init_pt(void);
-+
-+#define virt_to_ptep(__va) \
-+({ \
-+ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
-+ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
-+ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
-+ pte_offset_kernel(__pmd, (unsigned long)(__va)); \
-+})
-+
-+#define arbitrary_virt_to_machine(__va) \
-+({ \
-+ maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
-+ m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
-+})
-+#endif
-
- extern pud_t level3_kernel_pgt[512];
- extern pud_t level3_physmem_pgt[512];
-@@ -70,10 +92,20 @@ extern unsigned long empty_zero_page[PAG
- #define pgd_none(x) (!pgd_val(x))
- #define pud_none(x) (!pud_val(x))
-
-+#ifndef CONFIG_XEN
-+#define MAYBE_PAGE_USER 0
-+#define __pte_val(x) pte_val(x)
-+#else
-+#define MAYBE_PAGE_USER _PAGE_USER
-+#define __pte_val(x) (x).pte
-+#endif
-+
- static inline void set_pte(pte_t *dst, pte_t val)
- {
-- pte_val(*dst) = pte_val(val);
-+ __pte_val(*dst) = __pte_val(val);
- }
-+
-+#ifndef CONFIG_XEN
- #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
- static inline void set_pmd(pmd_t *dst, pmd_t val)
-@@ -85,26 +117,49 @@ static inline void set_pud(pud_t *dst, p
- {
- pud_val(*dst) = pud_val(val);
- }
-+#else
-+#define set_pte_batched(pteptr, pteval) \
-+ queue_l1_entry_update(pteptr, (pteval))
-+#define set_pte_at(_mm,addr,ptep,pteval) do { \
-+ if (((_mm) != current->mm && (_mm) != &init_mm) || \
-+ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
-+ set_pte((ptep), (pteval)); \
-+} while (0)
-+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
-+#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
-+#endif
-
- static inline void pud_clear (pud_t *pud)
- {
- set_pud(pud, __pud(0));
- }
-
-+#ifndef CONFIG_XEN
- static inline void set_pgd(pgd_t *dst, pgd_t val)
- {
- pgd_val(*dst) = pgd_val(val);
- }
-+#else
-+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
-+#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
-+#endif
-
- static inline void pgd_clear (pgd_t * pgd)
- {
- set_pgd(pgd, __pgd(0));
-+#ifdef CONFIG_XEN
-+ set_pgd(__user_pgd(pgd), __pgd(0));
-+#endif
- }
-
- #define pud_page(pud) \
- ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
-
-+#ifndef CONFIG_XEN
- #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0))
-+#else
-+#define ptep_get_and_clear(mm,addr,xp) __pte_ma(xchg(&(xp)->pte, 0))
-+#endif
-
- struct mm_struct;
-
-@@ -168,7 +223,7 @@ static inline pte_t ptep_get_and_clear_f
- #define _PAGE_NX (1UL<<_PAGE_BIT_NX)
-
- #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
--#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | MAYBE_PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-
- #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-@@ -181,23 +236,30 @@ static inline pte_t ptep_get_and_clear_f
- #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
- #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
- #define __PAGE_KERNEL \
-- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | MAYBE_PAGE_USER)
- #define __PAGE_KERNEL_EXEC \
-- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | MAYBE_PAGE_USER)
- #define __PAGE_KERNEL_NOCACHE \
-- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
-+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | MAYBE_PAGE_USER)
- #define __PAGE_KERNEL_RO \
-- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | MAYBE_PAGE_USER)
- #define __PAGE_KERNEL_VSYSCALL \
-- (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | MAYBE_PAGE_USER)
- #define __PAGE_KERNEL_VSYSCALL_NOCACHE \
-- (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
-+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD | MAYBE_PAGE_USER)
- #define __PAGE_KERNEL_LARGE \
-- (__PAGE_KERNEL | _PAGE_PSE)
-+ (__PAGE_KERNEL | _PAGE_PSE | MAYBE_PAGE_USER)
- #define __PAGE_KERNEL_LARGE_EXEC \
-- (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-+ (__PAGE_KERNEL_EXEC | _PAGE_PSE | MAYBE_PAGE_USER)
-
-+#ifndef CONFIG_XEN
- #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
-+#else
-+/*
-+ * We don't support GLOBAL page in xenolinux64
-+ */
-+#define MAKE_GLOBAL(x) __pgprot((x))
-+#endif
-
- #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
- #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
-@@ -243,49 +305,62 @@ static inline unsigned long pud_bad(pud_
- return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
- }
-
--#define pte_none(x) (!pte_val(x))
--#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-+#define pte_none(x) (!__pte_val(x))
-+#define pte_present(x) (__pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
- #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-
- #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
- right? */
- #define pte_page(x) pfn_to_page(pte_pfn(x))
--#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-+#ifndef CONFIG_XEN
-+#define pte_pfn(x) ((__pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-+#else
-+#define pte_mfn(x) ((__pte_val(x) & PTE_MASK) >> PAGE_SHIFT)
-+#define pte_pfn(_pte) mfn_to_local_pfn(pte_mfn(_pte))
-+#endif
-
- static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
- {
- pte_t pte;
-- pte_val(pte) = (page_nr << PAGE_SHIFT);
-- pte_val(pte) |= pgprot_val(pgprot);
-- pte_val(pte) &= __supported_pte_mask;
-+#ifndef CONFIG_XEN
-+ __pte_val(pte) = (page_nr << PAGE_SHIFT);
-+#else
-+ __pte_val(pte) = (pfn_to_mfn(page_nr) << PAGE_SHIFT);
-+#endif
-+ __pte_val(pte) |= pgprot_val(pgprot);
-+ __pte_val(pte) &= __supported_pte_mask;
- return pte;
- }
-
-+#ifdef CONFIG_XEN
-+#define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
-+#endif
-+
- /*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
- #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
--static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
--static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
--static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
--static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
--static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
--static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
--static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
--static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; }
--
--static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
--static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
--static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
--static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
--static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
--static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
--static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
--static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
--static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
--static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
--static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
-+static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
-+static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
-+static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
-+static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
-+static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
-+static inline int pte_huge(pte_t pte) { return (__pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; }
-+
-+static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
-+static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
-+static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= __LARGE_PTE; return pte; }
-
- struct vm_area_struct;
-
-@@ -331,20 +406,35 @@ static inline int pmd_large(pmd_t pte) {
- #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
- #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
- #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
--#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
-+#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
-
- /* PUD - Level3 access */
- /* to find an entry in a page-table-directory. */
- #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
- #define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
-+#ifndef CONFIG_XEN
- #define pud_offset_k(pgd, addr) pud_offset(pgd, addr)
- #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
-+#endif
-
- static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
- {
- return pud + pud_index(address);
- }
-
-+#ifdef CONFIG_XEN
-+static inline int pud_present(pud_t pud) { return !pud_none(pud); }
-+
-+/* Find correct pud via the hidden fourth level page level: */
-+
-+/* This accesses the reference page table of the boot cpu.
-+ Other CPUs get synced lazily via the page fault handler. */
-+static inline pud_t *pud_offset_k(pgd_t *pgd, unsigned long address)
-+{
-+ return pud_offset(pgd_offset_k(address), address);
-+}
-+#endif
-+
- /* PMD - Level 2 access */
- #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
- #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-@@ -353,9 +443,19 @@ static inline pud_t *__pud_offset_k(pud_
- #define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
- pmd_index(address))
- #define pmd_none(x) (!pmd_val(x))
-+#ifndef CONFIG_XEN
- #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-+#else
-+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-+ can temporarily clear it. */
-+#define pmd_present(x) (pmd_val(x))
-+#endif
- #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-+#ifndef CONFIG_XEN
- #define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
-+#else
-+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
-+#endif
- #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
- #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
-@@ -373,16 +473,16 @@ static inline pud_t *__pud_offset_k(pud_
- static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
- {
- pte_t pte;
-- pte_val(pte) = physpage | pgprot_val(pgprot);
-+ __pte_val(pte) = physpage | pgprot_val(pgprot);
- return pte;
- }
-
- /* Change flags of a PTE */
- static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
- {
-- pte_val(pte) &= _PAGE_CHG_MASK;
-- pte_val(pte) |= pgprot_val(newprot);
-- pte_val(pte) &= __supported_pte_mask;
-+ __pte_val(pte) &= _PAGE_CHG_MASK;
-+ __pte_val(pte) |= pgprot_val(newprot);
-+ __pte_val(pte) &= __supported_pte_mask;
- return pte;
- }
-
-@@ -405,6 +505,7 @@ static inline pte_t pte_modify(pte_t pte
- * race with other CPU's that might be updating the dirty
- * bit at the same time. */
- #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#ifndef CONFIG_XEN
- #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- if (__dirty) { \
-@@ -412,6 +513,19 @@ static inline pte_t pte_modify(pte_t pte
- flush_tlb_page(__vma, __address); \
- } \
- } while (0)
-+#else
-+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-+ do { \
-+ if (__dirty) { \
-+ if ( likely((__vma)->vm_mm == current->mm) ) { \
-+ BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
-+ } else { \
-+ xen_l1_entry_update((__ptep), (__entry)); \
-+ flush_tlb_page((__vma), (__address)); \
-+ } \
-+ } \
-+ } while (0)
-+#endif
-
- /* Encode and de-code a swap entry */
- #define __swp_type(x) (((x).val >> 1) & 0x3f)
-@@ -424,8 +538,36 @@ static inline pte_t pte_modify(pte_t pte
-
- extern int kern_addr_valid(unsigned long addr);
-
-+#ifndef CONFIG_XEN
- #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-+#else
-+#define DOMID_LOCAL (0xFFFFU)
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+ unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid);
-+
-+int direct_kernel_remap_pfn_range(unsigned long address,
-+ unsigned long mfn,
-+ unsigned long size,
-+ pgprot_t prot,
-+ domid_t domid);
-+
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+ unsigned long address,
-+ uint64_t *ptep);
-+
-+int touch_pte_range(struct mm_struct *mm,
-+ unsigned long address,
-+ unsigned long size);
-+
-+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
-+ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
-+#endif
-
- #define MK_IOSPACE_PFN(space, pfn) (pfn)
- #define GET_IOSPACE(pfn) 0
-diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
-index 8c8d88c..78cd5a3 100644
---- a/include/asm-x86_64/processor.h
-+++ b/include/asm-x86_64/processor.h
-@@ -138,27 +138,51 @@ extern unsigned int init_intel_cacheinfo
- */
- extern unsigned long mmu_cr4_features;
-
-+#ifdef CONFIG_XEN
-+static inline void __unsupported_cr4(void)
-+{
-+ const char *msg = "Xen unsupported cr4 update\n";
-+
-+ (void)HYPERVISOR_console_io(
-+ CONSOLEIO_write, __builtin_strlen(msg), (char *)msg);
-+ BUG();
-+}
-+#endif
-+
- static inline void set_in_cr4 (unsigned long mask)
- {
- mmu_cr4_features |= mask;
-+#ifndef CONFIG_XEN
- __asm__("movq %%cr4,%%rax\n\t"
- "orq %0,%%rax\n\t"
- "movq %%rax,%%cr4\n"
- : : "irg" (mask)
- :"ax");
-+#else
-+ switch (mask) {
-+ case X86_CR4_OSFXSR:
-+ case X86_CR4_OSXMMEXCPT:
-+ break;
-+ default:
-+ __unsupported_cr4();
-+ }
-+#endif
- }
-
- static inline void clear_in_cr4 (unsigned long mask)
- {
- mmu_cr4_features &= ~mask;
-+#ifndef CONFIG_XEN
- __asm__("movq %%cr4,%%rax\n\t"
- "andq %0,%%rax\n\t"
- "movq %%rax,%%cr4\n"
- : : "irg" (~mask)
- :"ax");
-+#else
-+ __unsupported_cr4();
-+#endif
- }
-
--
- /*
- * User space process size. 47bits minus one guard page.
- */
-@@ -180,7 +204,9 @@ static inline void clear_in_cr4 (unsigne
- #define IO_BITMAP_BITS 65536
- #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
- #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-+#ifndef CONFIG_X86_NO_TSS
- #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-+#endif
- #define INVALID_IO_BITMAP_OFFSET 0x8000
-
- struct i387_fxsave_struct {
-@@ -201,6 +227,7 @@ union i387_union {
- struct i387_fxsave_struct fxsave;
- };
-
-+#ifndef CONFIG_X86_NO_TSS
- struct tss_struct {
- u32 reserved1;
- u64 rsp0;
-@@ -224,8 +251,10 @@ struct tss_struct {
- unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
- } __attribute__((packed)) ____cacheline_aligned;
-
--extern struct cpuinfo_x86 boot_cpu_data;
- DECLARE_PER_CPU(struct tss_struct,init_tss);
-+#endif
-+
-+extern struct cpuinfo_x86 boot_cpu_data;
-
- #ifdef CONFIG_X86_VSMP
- #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
-@@ -256,6 +285,9 @@ struct thread_struct {
- /* IO permissions. the bitmap could be moved into the GDT, that would make
- switch faster for a limited number of ioperm using tasks. -AK */
- int ioperm;
-+#ifdef CONFIG_XEN
-+ unsigned int iopl;
-+#endif
- unsigned long *io_bitmap_ptr;
- unsigned io_bitmap_max;
- /* cached TLS descriptors. */
-@@ -266,9 +298,11 @@ struct thread_struct {
- .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
- }
-
-+#ifndef CONFIG_X86_NO_TSS
- #define INIT_TSS { \
- .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
- }
-+#endif
-
- #define INIT_MMAP \
- { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-@@ -285,6 +319,7 @@ struct thread_struct {
- set_fs(USER_DS); \
- } while(0)
-
-+#ifndef CONFIG_XEN
- #define get_debugreg(var, register) \
- __asm__("movq %%db" #register ", %0" \
- :"=r" (var))
-@@ -292,6 +327,12 @@ struct thread_struct {
- __asm__("movq %0,%%db" #register \
- : /* no output */ \
- :"r" (value))
-+#else
-+#define get_debugreg(var, register) \
-+ var = HYPERVISOR_get_debugreg(register)
-+#define set_debugreg(value, register) \
-+ HYPERVISOR_set_debugreg(register, value)
-+#endif
-
- struct task_struct;
- struct mm_struct;
-@@ -479,4 +520,17 @@ extern int bootloader_type;
-
- #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
-
-+static inline void modify_cpu_capabilities(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_XEN
-+ clear_bit(X86_FEATURE_VME, c->x86_capability);
-+ clear_bit(X86_FEATURE_DE, c->x86_capability);
-+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
-+ clear_bit(X86_FEATURE_PGE, c->x86_capability);
-+ clear_bit(X86_FEATURE_SEP, c->x86_capability);
-+ if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+ clear_bit(X86_FEATURE_MTRR, c->x86_capability);
-+#endif
-+}
-+
- #endif /* __ASM_X86_64_PROCESSOR_H */
-diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
-index 9ccbb2c..c838964 100644
---- a/include/asm-x86_64/smp.h
-+++ b/include/asm-x86_64/smp.h
-@@ -34,7 +34,11 @@ struct pt_regs;
- extern cpumask_t cpu_present_mask;
- extern cpumask_t cpu_possible_map;
- extern cpumask_t cpu_online_map;
-+#ifndef CONFIG_XEN
- extern cpumask_t cpu_callout_map;
-+#else
-+#define cpu_callout_map cpu_possible_map
-+#endif
- extern cpumask_t cpu_initialized;
-
- /*
-@@ -72,11 +76,13 @@ static inline int num_booting_cpus(void)
-
- #define raw_smp_processor_id() read_pda(cpunumber)
-
-+#ifdef CONFIG_X86_LOCAL_APIC
- static inline int hard_smp_processor_id(void)
- {
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
- }
-+#endif
-
- extern int safe_smp_processor_id(void);
- extern int __cpu_disable(void);
-@@ -100,6 +106,7 @@ extern u8 x86_cpu_to_apicid[NR_CPUS]; /*
- extern u8 x86_cpu_to_log_apicid[NR_CPUS];
- extern u8 bios_cpu_apicid[];
-
-+#ifdef CONFIG_X86_LOCAL_APIC
- static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
- {
- return cpus_addr(cpumask)[0];
-@@ -112,6 +119,7 @@ static inline int cpu_present_to_apicid(
- else
- return BAD_APICID;
- }
-+#endif
-
- #endif /* !ASSEMBLY */
-
-@@ -129,7 +137,7 @@ static inline int cpu_present_to_apicid(
- })
- #endif
-
--#ifndef __ASSEMBLY__
-+#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__)
- static __inline int logical_smp_processor_id(void)
- {
- /* we don't want to mark this access volatile - bad code generation */
-diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
-index 60757ef..ba0a592 100644
---- a/include/asm-x86_64/swiotlb.h
-+++ b/include/asm-x86_64/swiotlb.h
-@@ -1,5 +1,5 @@
- #ifndef _ASM_SWIOTLB_H
--#define _ASM_SWTIOLB_H 1
-+#define _ASM_SWIOTLB_H 1
-
- #include <linux/config.h>
-
-@@ -51,4 +51,20 @@ extern int swiotlb;
-
- extern void pci_swiotlb_init(void);
-
-+#ifdef CONFIG_XEN
-+
-+extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
-+ unsigned long offset, size_t size,
-+ enum dma_data_direction direction);
-+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+ size_t size, enum dma_data_direction direction);
-+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
-+
-+#define swiotlb_sync_single_range_for_cpu(dev, dma_handle, offset, size, direction) \
-+ swiotlb_sync_single_for_cpu(dev, (dma_handle) + (offset), size, direction)
-+#define swiotlb_sync_single_range_for_device(dev, dma_handle, offset, size, direction) \
-+ swiotlb_sync_single_for_device(dev, (dma_handle) + (offset), size, direction)
-+
-+#endif
-+
- #endif /* _ASM_SWTIOLB_H */
-diff --git a/include/asm-x86_64/synch_bitops.h b/include/asm-x86_64/synch_bitops.h
-new file mode 100644
-index 0000000..bc77cb1
---- /dev/null
-+++ b/include/asm-x86_64/synch_bitops.h
-@@ -0,0 +1 @@
-+#include <asm-i386/synch_bitops.h>
-diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
-index b7f6603..83203d5 100644
---- a/include/asm-x86_64/system.h
-+++ b/include/asm-x86_64/system.h
-@@ -4,10 +4,21 @@
- #include <linux/config.h>
- #include <linux/kernel.h>
- #include <asm/segment.h>
-+#ifdef CONFIG_XEN
-+#include <asm/synch_bitops.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/arch-x86_64.h>
-+#endif
-
- #ifdef __KERNEL__
-
- #ifdef CONFIG_SMP
-+#define __vcpu_id smp_processor_id()
-+#else
-+#define __vcpu_id 0
-+#endif
-+
-+#ifdef CONFIG_SMP
- #define LOCK_PREFIX "lock ; "
- #else
- #define LOCK_PREFIX ""
-@@ -155,7 +166,13 @@ struct alt_instr {
- /*
- * Clear and set 'TS' bit respectively
- */
-+#ifndef CONFIG_XEN
- #define clts() __asm__ __volatile__ ("clts")
-+#define stts() write_cr0(8 | read_cr0())
-+#else
-+#define clts() (HYPERVISOR_fpu_taskswitch(0))
-+#define stts() (HYPERVISOR_fpu_taskswitch(1))
-+#endif
-
- static inline unsigned long read_cr0(void)
- {
-@@ -164,17 +181,27 @@ static inline unsigned long read_cr0(voi
- return cr0;
- }
-
-+#ifndef CONFIG_XEN
- static inline void write_cr0(unsigned long val)
- {
- asm volatile("movq %0,%%cr0" :: "r" (val));
- }
-+#endif
-
-+#ifndef CONFIG_XEN
- static inline unsigned long read_cr3(void)
- {
- unsigned long cr3;
- asm("movq %%cr3,%0" : "=r" (cr3));
- return cr3;
- }
-+#else
-+#define read_cr3() ({ \
-+ unsigned long __dummy; \
-+ asm("movq %%cr3,%0" : "=r" (__dummy)); \
-+ machine_to_phys(__dummy); \
-+})
-+#endif
-
- static inline unsigned long read_cr4(void)
- {
-@@ -183,12 +210,12 @@ static inline unsigned long read_cr4(voi
- return cr4;
- }
-
-+#ifndef CONFIG_XEN
- static inline void write_cr4(unsigned long val)
- {
- asm volatile("movq %0,%%cr4" :: "r" (val));
- }
--
--#define stts() write_cr0(8 | read_cr0())
-+#endif
-
- #define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory");
-@@ -332,6 +359,8 @@ static inline unsigned long __cmpxchg(vo
-
- #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
-
-+#ifndef CONFIG_XEN
-+
- /* interrupt control.. */
- #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
- #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
-@@ -370,6 +399,95 @@ static inline unsigned long __cmpxchg(vo
- /* used when interrupts are already enabled or to shutdown the processor */
- #define halt() __asm__ __volatile__("hlt": : :"memory")
-
-+#else
-+
-+/*
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
-+
-+#define __cli() \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ _vcpu->evtchn_upcall_mask = 1; \
-+ preempt_enable_no_resched(); \
-+ barrier(); \
-+} while (0)
-+
-+#define __sti() \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ barrier(); \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ _vcpu->evtchn_upcall_mask = 0; \
-+ barrier(); /* unmask then check (avoid races) */ \
-+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
-+ force_evtchn_callback(); \
-+ preempt_enable(); \
-+} while (0)
-+
-+#define __save_flags(x) \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ (x) = _vcpu->evtchn_upcall_mask; \
-+ preempt_enable(); \
-+} while (0)
-+
-+#define __restore_flags(x) \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ barrier(); \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
-+ barrier(); /* unmask then check (avoid races) */ \
-+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
-+ force_evtchn_callback(); \
-+ preempt_enable(); \
-+ } else \
-+ preempt_enable_no_resched(); \
-+} while (0)
-+
-+#define __save_and_cli(x) \
-+do { \
-+ vcpu_info_t *_vcpu; \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ (x) = _vcpu->evtchn_upcall_mask; \
-+ _vcpu->evtchn_upcall_mask = 1; \
-+ preempt_enable_no_resched(); \
-+ barrier(); \
-+} while (0)
-+
-+#define local_irq_save(x) __save_and_cli(x)
-+#define local_irq_restore(x) __restore_flags(x)
-+#define local_save_flags(x) __save_flags(x)
-+#define local_irq_disable() __cli()
-+#define local_irq_enable() __sti()
-+
-+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
-+#define irqs_disabled() \
-+({ int ___x; \
-+ vcpu_info_t *_vcpu; \
-+ preempt_disable(); \
-+ _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
-+ ___x = (_vcpu->evtchn_upcall_mask != 0); \
-+ preempt_enable_no_resched(); \
-+ ___x; })
-+
-+#define safe_halt() ((void)0)
-+#define halt() ((void)0)
-+
-+#endif
-+
- void cpu_idle_wait(void);
-
- extern unsigned long arch_align_stack(unsigned long sp);
-diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
-index 4a9c20e..c4519ee 100644
---- a/include/asm-x86_64/tlbflush.h
-+++ b/include/asm-x86_64/tlbflush.h
-@@ -5,6 +5,7 @@
- #include <linux/mm.h>
- #include <asm/processor.h>
-
-+#ifndef CONFIG_XEN
- #define __flush_tlb() \
- do { \
- unsigned long tmpreg; \
-@@ -15,11 +16,15 @@
- : "=r" (tmpreg) \
- :: "memory"); \
- } while (0)
-+#else
-+#define __flush_tlb() xen_tlb_flush()
-+#endif
-
- /*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-+#ifndef CONFIG_XEN
- #define __flush_tlb_global() \
- do { \
- unsigned long tmpreg, cr4, cr4_orig; \
-@@ -36,13 +41,20 @@
- : "i" (~X86_CR4_PGE) \
- : "memory"); \
- } while (0)
-+#else
-+#define __flush_tlb_global() xen_tlb_flush()
-+#endif
-
- extern unsigned long pgkern_mask;
-
- #define __flush_tlb_all() __flush_tlb_global()
-
-+#ifndef CONFIG_XEN
- #define __flush_tlb_one(addr) \
-- __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
-+ __asm__ __volatile__("invlpg %0": :"m" (*(char *)(addr)))
-+#else
-+#define __flush_tlb_one(addr) xen_invlpg((unsigned long)(addr))
-+#endif
-
-
- /*
-diff --git a/include/asm-x86_64/vga.h b/include/asm-x86_64/vga.h
-index ef0c0e5..4de2ca6 100644
---- a/include/asm-x86_64/vga.h
-+++ b/include/asm-x86_64/vga.h
-@@ -12,7 +12,7 @@
- * access the videoram directly without any black magic.
- */
-
--#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x)
-+#define VGA_MAP_MEM(x) ((unsigned long)isa_bus_to_virt(x))
-
- #define vga_readb(x) (*(x))
- #define vga_writeb(x,y) (*(y) = (x))
-diff --git a/include/asm-x86_64/xor.h b/include/asm-x86_64/xor.h
-index f942fcc..cf90834 100644
---- a/include/asm-x86_64/xor.h
-+++ b/include/asm-x86_64/xor.h
-@@ -33,15 +33,31 @@
- * no advantages to be gotten from x86-64 here anyways.
- */
-
-+#include <linux/config.h>
-+
- typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
-
-+#ifndef CONFIG_XEN
-+
-+#define PHYS_CLTS(x) x
-+#define VIRT_CLTS(what)
-+#define COMMA ,
-+
-+#else
-+
-+#define PHYS_CLTS(x)
-+#define VIRT_CLTS(what) ((void)((current_thread_info()->status & TS_USEDFPU) ? : what()))
-+
-+#endif
-+
- /* Doesn't use gcc to save the XMM registers, because there is no easy way to
- tell it to do a clts before the register saving. */
- #define XMMS_SAVE do { \
- preempt_disable(); \
-+ VIRT_CLTS(clts); \
- asm volatile ( \
-- "movq %%cr0,%0 ;\n\t" \
-- "clts ;\n\t" \
-+ PHYS_CLTS("movq %%cr0,%0 ;\n\t") \
-+ PHYS_CLTS("clts ;\n\t") \
- "movups %%xmm0,(%1) ;\n\t" \
- "movups %%xmm1,0x10(%1) ;\n\t" \
- "movups %%xmm2,0x20(%1) ;\n\t" \
-@@ -58,10 +74,11 @@ typedef struct { unsigned long a,b; } __
- "movups 0x10(%1),%%xmm1 ;\n\t" \
- "movups 0x20(%1),%%xmm2 ;\n\t" \
- "movups 0x30(%1),%%xmm3 ;\n\t" \
-- "movq %0,%%cr0 ;\n\t" \
-+ PHYS_CLTS("movq %0,%%cr0 ;\n\t") \
- : \
- : "r" (cr0), "r" (xmm_save) \
- : "memory"); \
-+ VIRT_CLTS(stts); \
- preempt_enable(); \
- } while(0)
-
-diff --git a/include/linux/gfp.h b/include/linux/gfp.h
-index 7851e6b..cba61ae 100644
---- a/include/linux/gfp.h
-+++ b/include/linux/gfp.h
-@@ -98,7 +98,11 @@ static inline int gfp_zone(gfp_t gfp)
- */
-
- #ifndef HAVE_ARCH_FREE_PAGE
--static inline void arch_free_page(struct page *page, int order) { }
-+/*
-+ * If arch_free_page returns non-zero then the generic free_page code can
-+ * immediately bail: the arch-specific function has done all the work.
-+ */
-+static inline int arch_free_page(struct page *page, int order) { return 0; }
- #endif
-
- extern struct page *
-diff --git a/include/linux/highmem.h b/include/linux/highmem.h
-index 6bece92..42c6aa2 100644
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -13,10 +13,16 @@
-
- /* declarations for linux/mm/highmem.c */
- unsigned int nr_free_highpages(void);
-+#ifdef CONFIG_XEN
-+void kmap_flush_unused(void);
-+#endif
-
- #else /* CONFIG_HIGHMEM */
-
- static inline unsigned int nr_free_highpages(void) { return 0; }
-+#ifdef CONFIG_XEN
-+static inline void kmap_flush_unused(void) { }
-+#endif
-
- static inline void *kmap(struct page *page)
- {
-diff --git a/include/linux/irq.h b/include/linux/irq.h
-index 6c5d4c8..4ffabba 100644
---- a/include/linux/irq.h
-+++ b/include/linux/irq.h
-@@ -94,6 +94,9 @@ irq_descp (int irq)
- #include <asm/hw_irq.h> /* the arch dependent stuff */
-
- extern int setup_irq(unsigned int irq, struct irqaction * new);
-+#ifdef CONFIG_XEN
-+extern int teardown_irq(unsigned int irq, struct irqaction * old);
-+#endif
-
- #ifdef CONFIG_GENERIC_HARDIRQS
- extern cpumask_t irq_affinity[NR_IRQS];
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 498ff87..4a05b8d 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -166,6 +166,9 @@ extern unsigned int kobjsize(const void
- #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
- #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
- #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
-+#ifdef CONFIG_XEN
-+#define VM_FOREIGN 0x04000000 /* Has pages belonging to another VM */
-+#endif
-
- #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
- #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
-@@ -245,6 +248,9 @@ struct page {
- #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
- spinlock_t ptl;
- #endif
-+#ifdef CONFIG_XEN
-+ struct list_head ballooned;
-+#endif
- };
- pgoff_t index; /* Our offset within mapping. */
- struct list_head lru; /* Pageout list, eg. active_list
-@@ -1013,6 +1019,13 @@ struct page *follow_page(struct vm_area_
- #define FOLL_GET 0x04 /* do get_page on page */
- #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
-
-+#ifdef CONFIG_XEN
-+typedef int (*pte_fn_t)(pte_t *pte, struct page *pte_page, unsigned long addr,
-+ void *data);
-+extern int generic_page_range(struct mm_struct *mm, unsigned long address,
-+ unsigned long size, pte_fn_t fn, void *data);
-+#endif
-+
- #ifdef CONFIG_PROC_FS
- void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
- #else
-diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index ad7cc22..0d20528 100644
---- a/include/linux/skbuff.h
-+++ b/include/linux/skbuff.h
-@@ -189,6 +189,8 @@ enum {
- * @local_df: allow local fragmentation
- * @cloned: Head may be cloned (check refcnt to be sure)
- * @nohdr: Payload reference only, must not modify header
-+ * @proto_csum_valid: Protocol csum validated since arriving at localhost
-+ * @proto_csum_blank: Protocol csum must be added before leaving localhost
- * @pkt_type: Packet class
- * @fclone: skbuff clone status
- * @ip_summed: Driver fed us an IP checksum
-@@ -265,7 +267,13 @@ struct sk_buff {
- nfctinfo:3;
- __u8 pkt_type:3,
- fclone:2,
-+#ifndef CONFIG_XEN
- ipvs_property:1;
-+#else
-+ ipvs_property:1,
-+ proto_csum_valid:1,
-+ proto_csum_blank:1;
-+#endif
- __be16 protocol;
-
- void (*destructor)(struct sk_buff *skb);
-@@ -321,7 +329,8 @@ static inline struct sk_buff *alloc_skb_
-
- extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
- unsigned int size,
-- gfp_t priority);
-+ gfp_t priority,
-+ int fclone);
- extern void kfree_skbmem(struct sk_buff *skb);
- extern struct sk_buff *skb_clone(struct sk_buff *skb,
- gfp_t priority);
-@@ -1051,7 +1060,7 @@ static inline struct sk_buff *__dev_allo
- return skb;
- }
- #else
--extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
-+extern struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask);
- #endif
-
- /**
-diff --git a/include/xen/balloon.h b/include/xen/balloon.h
-new file mode 100644
-index 0000000..7f2be02
---- /dev/null
-+++ b/include/xen/balloon.h
-@@ -0,0 +1,70 @@
-+/******************************************************************************
-+ * balloon.h
-+ *
-+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
-+ *
-+ * Copyright (c) 2003, B Dragovic
-+ * Copyright (c) 2003-2004, M Williamson, K Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __ASM_BALLOON_H__
-+#define __ASM_BALLOON_H__
-+
-+/*
-+ * Inform the balloon driver that it should allow some slop for device-driver
-+ * memory activities.
-+ */
-+extern void
-+balloon_update_driver_allowance(
-+ long delta);
-+
-+/* Allocate an empty low-memory page range. */
-+extern struct page *
-+balloon_alloc_empty_page_range(
-+ unsigned long nr_pages);
-+
-+/* Deallocate an empty page range, adding to the balloon. */
-+extern void
-+balloon_dealloc_empty_page_range(
-+ struct page *page, unsigned long nr_pages);
-+
-+/*
-+ * Prevent the balloon driver from changing the memory reservation during
-+ * a driver critical region.
-+ */
-+extern spinlock_t balloon_lock;
-+#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
-+#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
-+
-+#endif /* __ASM_BALLOON_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/driver_util.h b/include/xen/driver_util.h
-new file mode 100644
-index 0000000..fe45de5
---- /dev/null
-+++ b/include/xen/driver_util.h
-@@ -0,0 +1,26 @@
-+
-+#ifndef __ASM_XEN_DRIVER_UTIL_H__
-+#define __ASM_XEN_DRIVER_UTIL_H__
-+
-+#include <linux/config.h>
-+#include <linux/vmalloc.h>
-+
-+/* Allocate/destroy a 'vmalloc' VM area. */
-+extern struct vm_struct *alloc_vm_area(unsigned long size);
-+extern void free_vm_area(struct vm_struct *area);
-+
-+/* Lock an area so that PTEs are accessible in the current address space. */
-+extern void lock_vm_area(struct vm_struct *area);
-+extern void unlock_vm_area(struct vm_struct *area);
-+
-+#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/evtchn.h b/include/xen/evtchn.h
-new file mode 100644
-index 0000000..594c1a6
---- /dev/null
-+++ b/include/xen/evtchn.h
-@@ -0,0 +1,123 @@
-+/******************************************************************************
-+ * evtchn.h
-+ *
-+ * Communication via Xen event channels.
-+ * Also definitions for the device that demuxes notifications to userspace.
-+ *
-+ * Copyright (c) 2004-2005, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __ASM_EVTCHN_H__
-+#define __ASM_EVTCHN_H__
-+
-+#include <linux/config.h>
-+#include <linux/interrupt.h>
-+#include <asm/hypervisor.h>
-+#include <asm/ptrace.h>
-+#include <asm/synch_bitops.h>
-+#include <xen/interface/event_channel.h>
-+#include <linux/smp.h>
-+
-+/*
-+ * LOW-LEVEL DEFINITIONS
-+ */
-+
-+/*
-+ * Dynamically bind an event source to an IRQ-like callback handler.
-+ * On some platforms this may not be implemented via the Linux IRQ subsystem.
-+ * The IRQ argument passed to the callback handler is the same as returned
-+ * from the bind call. It may not correspond to a Linux IRQ number.
-+ * Returns IRQ or negative errno.
-+ * UNBIND: Takes IRQ to unbind from; automatically closes the event channel.
-+ */
-+extern int bind_evtchn_to_irqhandler(
-+ unsigned int evtchn,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-+extern int bind_virq_to_irqhandler(
-+ unsigned int virq,
-+ unsigned int cpu,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-+extern int bind_ipi_to_irqhandler(
-+ unsigned int ipi,
-+ unsigned int cpu,
-+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+ unsigned long irqflags,
-+ const char *devname,
-+ void *dev_id);
-+
-+/*
-+ * Common unbind function for all event sources. Takes IRQ to unbind from.
-+ * Automatically closes the underlying event channel (even for bindings
-+ * made with bind_evtchn_to_irqhandler()).
-+ */
-+extern void unbind_from_irqhandler(unsigned int irq, void *dev_id);
-+
-+extern void irq_resume(void);
-+
-+/* Entry point for notifications into Linux subsystems. */
-+asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
-+
-+/* Entry point for notifications into the userland character device. */
-+extern void evtchn_device_upcall(int port);
-+
-+extern void mask_evtchn(int port);
-+extern void unmask_evtchn(int port);
-+
-+static inline void clear_evtchn(int port)
-+{
-+ shared_info_t *s = HYPERVISOR_shared_info;
-+ synch_clear_bit(port, &s->evtchn_pending[0]);
-+}
-+
-+static inline void notify_remote_via_evtchn(int port)
-+{
-+ evtchn_op_t op;
-+ op.cmd = EVTCHNOP_send,
-+ op.u.send.port = port;
-+ (void)HYPERVISOR_event_channel_op(&op);
-+}
-+
-+/*
-+ * Unlike notify_remote_via_evtchn(), this is safe to use across
-+ * save/restore. Notifications on a broken connection are silently dropped.
-+ */
-+extern void notify_remote_via_irq(int irq);
-+
-+#endif /* __ASM_EVTCHN_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/features.h b/include/xen/features.h
-new file mode 100644
-index 0000000..fd47bef
---- /dev/null
-+++ b/include/xen/features.h
-@@ -0,0 +1,20 @@
-+/******************************************************************************
-+ * features.h
-+ *
-+ * Query the features reported by Xen.
-+ *
-+ * Copyright (c) 2006, Ian Campbell
-+ */
-+
-+#ifndef __ASM_XEN_FEATURES_H__
-+#define __ASM_XEN_FEATURES_H__
-+
-+#include <xen/interface/version.h>
-+
-+extern void setup_xen_features(void);
-+
-+extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32];
-+
-+#define xen_feature(flag) (xen_features[flag])
-+
-+#endif /* __ASM_XEN_FEATURES_H__ */
-diff --git a/include/xen/foreign_page.h b/include/xen/foreign_page.h
-new file mode 100644
-index 0000000..0af4e13
---- /dev/null
-+++ b/include/xen/foreign_page.h
-@@ -0,0 +1,40 @@
-+/******************************************************************************
-+ * foreign_page.h
-+ *
-+ * Provide a "foreign" page type, that is owned by a foreign allocator and
-+ * not the normal buddy allocator in page_alloc.c
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __ASM_XEN_FOREIGN_PAGE_H__
-+#define __ASM_XEN_FOREIGN_PAGE_H__
-+
-+#define PG_foreign PG_arch_1
-+
-+#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
-+
-+#define SetPageForeign(page, dtor) do { \
-+ set_bit(PG_foreign, &(page)->flags); \
-+ (page)->mapping = (void *)dtor; \
-+} while (0)
-+
-+#define ClearPageForeign(page) do { \
-+ clear_bit(PG_foreign, &(page)->flags); \
-+ (page)->mapping = NULL; \
-+} while (0)
-+
-+#define PageForeignDestructor(page) \
-+ ( (void (*) (struct page *)) (page)->mapping )
-+
-+#endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/gnttab.h b/include/xen/gnttab.h
-new file mode 100644
-index 0000000..58aeb49
---- /dev/null
-+++ b/include/xen/gnttab.h
-@@ -0,0 +1,120 @@
-+/******************************************************************************
-+ * gnttab.h
-+ *
-+ * Two sets of functionality:
-+ * 1. Granting foreign access to our memory reservation.
-+ * 2. Accessing others' memory reservations via grant references.
-+ * (i.e., mechanisms for both sender and recipient of grant references)
-+ *
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * Copyright (c) 2005, Christopher Clark
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __ASM_GNTTAB_H__
-+#define __ASM_GNTTAB_H__
-+
-+#include <linux/config.h>
-+#include <asm/hypervisor.h>
-+#include <xen/interface/grant_table.h>
-+
-+/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
-+#ifdef __ia64__
-+#define NR_GRANT_FRAMES 1
-+#else
-+#define NR_GRANT_FRAMES 4
-+#endif
-+
-+struct gnttab_free_callback {
-+ struct gnttab_free_callback *next;
-+ void (*fn)(void *);
-+ void *arg;
-+ u16 count;
-+};
-+
-+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
-+ int readonly);
-+
-+/*
-+ * End access through the given grant reference, iff the grant entry is no
-+ * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
-+ * use.
-+ */
-+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
-+
-+/*
-+ * Eventually end access through the given grant reference, and once that
-+ * access has been ended, free the given page too. Access will be ended
-+ * immediately iff the grant entry is not in use, otherwise it will happen
-+ * some time later. page may be 0, in which case no freeing will occur.
-+ */
-+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
-+ unsigned long page);
-+
-+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
-+
-+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
-+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
-+
-+int gnttab_query_foreign_access(grant_ref_t ref);
-+
-+/*
-+ * operations on reserved batches of grant references
-+ */
-+int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
-+
-+void gnttab_free_grant_reference(grant_ref_t ref);
-+
-+void gnttab_free_grant_references(grant_ref_t head);
-+
-+int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
-+
-+void gnttab_release_grant_reference(grant_ref_t *private_head,
-+ grant_ref_t release);
-+
-+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
-+ void (*fn)(void *), void *arg, u16 count);
-+
-+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
-+ unsigned long frame, int readonly);
-+
-+void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
-+ unsigned long pfn);
-+
-+#ifdef __ia64__
-+#define gnttab_map_vaddr(map) __va(map.dev_bus_addr)
-+#else
-+#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
-+#endif
-+
-+#endif /* __ASM_GNTTAB_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/interface/acm.h b/include/xen/interface/acm.h
-new file mode 100644
-index 0000000..a7a5a57
---- /dev/null
-+++ b/include/xen/interface/acm.h
-@@ -0,0 +1,181 @@
-+/*
-+ * acm.h: Xen access control module interface defintions
-+ *
-+ * Reiner Sailer <sailer@watson.ibm.com>
-+ * Copyright (c) 2005, International Business Machines Corporation.
-+ */
-+
-+#ifndef _XEN_PUBLIC_ACM_H
-+#define _XEN_PUBLIC_ACM_H
-+
-+#include "xen.h"
-+#include "sched_ctl.h"
-+
-+/* if ACM_DEBUG defined, all hooks should
-+ * print a short trace message (comment it out
-+ * when not in testing mode )
-+ */
-+/* #define ACM_DEBUG */
-+
-+#ifdef ACM_DEBUG
-+# define printkd(fmt, args...) printk(fmt,## args)
-+#else
-+# define printkd(fmt, args...)
-+#endif
-+
-+/* default ssid reference value if not supplied */
-+#define ACM_DEFAULT_SSID 0x0
-+#define ACM_DEFAULT_LOCAL_SSID 0x0
-+
-+/* Internal ACM ERROR types */
-+#define ACM_OK 0
-+#define ACM_UNDEF -1
-+#define ACM_INIT_SSID_ERROR -2
-+#define ACM_INIT_SOID_ERROR -3
-+#define ACM_ERROR -4
-+
-+/* External ACCESS DECISIONS */
-+#define ACM_ACCESS_PERMITTED 0
-+#define ACM_ACCESS_DENIED -111
-+#define ACM_NULL_POINTER_ERROR -200
-+
-+/* primary policy in lower 4 bits */
-+#define ACM_NULL_POLICY 0
-+#define ACM_CHINESE_WALL_POLICY 1
-+#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
-+#define ACM_POLICY_UNDEFINED 15
-+
-+/* combinations have secondary policy component in higher 4bit */
-+#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
-+ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
-+
-+/* policy: */
-+#define ACM_POLICY_NAME(X) \
-+ ((X) == (ACM_NULL_POLICY)) ? "NULL policy" : \
-+ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL policy" : \
-+ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT policy" : \
-+ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT policy" : \
-+ "UNDEFINED policy"
-+
-+/* the following policy versions must be increased
-+ * whenever the interpretation of the related
-+ * policy's data structure changes
-+ */
-+#define ACM_POLICY_VERSION 1
-+#define ACM_CHWALL_VERSION 1
-+#define ACM_STE_VERSION 1
-+
-+/* defines a ssid reference used by xen */
-+typedef uint32_t ssidref_t;
-+
-+/* hooks that are known to domains */
-+enum acm_hook_type {NONE=0, SHARING};
-+
-+/* -------security policy relevant type definitions-------- */
-+
-+/* type identifier; compares to "equal" or "not equal" */
-+typedef uint16_t domaintype_t;
-+
-+/* CHINESE WALL POLICY DATA STRUCTURES
-+ *
-+ * current accumulated conflict type set:
-+ * When a domain is started and has a type that is in
-+ * a conflict set, the conflicting types are incremented in
-+ * the aggregate set. When a domain is destroyed, the
-+ * conflicting types to its type are decremented.
-+ * If a domain has multiple types, this procedure works over
-+ * all those types.
-+ *
-+ * conflict_aggregate_set[i] holds the number of
-+ * running domains that have a conflict with type i.
-+ *
-+ * running_types[i] holds the number of running domains
-+ * that include type i in their ssidref-referenced type set
-+ *
-+ * conflict_sets[i][j] is "0" if type j has no conflict
-+ * with type i and is "1" otherwise.
-+ */
-+/* high-16 = version, low-16 = check magic */
-+#define ACM_MAGIC 0x0001debc
-+
-+/* each offset in bytes from start of the struct they
-+ * are part of */
-+
-+/* each buffer consists of all policy information for
-+ * the respective policy given in the policy code
-+ *
-+ * acm_policy_buffer, acm_chwall_policy_buffer,
-+ * and acm_ste_policy_buffer need to stay 32-bit aligned
-+ * because we create binary policies also with external
-+ * tools that assume packed representations (e.g. the java tool)
-+ */
-+struct acm_policy_buffer {
-+ uint32_t policy_version; /* ACM_POLICY_VERSION */
-+ uint32_t magic;
-+ uint32_t len;
-+ uint32_t primary_policy_code;
-+ uint32_t primary_buffer_offset;
-+ uint32_t secondary_policy_code;
-+ uint32_t secondary_buffer_offset;
-+};
-+
-+struct acm_chwall_policy_buffer {
-+ uint32_t policy_version; /* ACM_CHWALL_VERSION */
-+ uint32_t policy_code;
-+ uint32_t chwall_max_types;
-+ uint32_t chwall_max_ssidrefs;
-+ uint32_t chwall_max_conflictsets;
-+ uint32_t chwall_ssid_offset;
-+ uint32_t chwall_conflict_sets_offset;
-+ uint32_t chwall_running_types_offset;
-+ uint32_t chwall_conflict_aggregate_offset;
-+};
-+
-+struct acm_ste_policy_buffer {
-+ uint32_t policy_version; /* ACM_STE_VERSION */
-+ uint32_t policy_code;
-+ uint32_t ste_max_types;
-+ uint32_t ste_max_ssidrefs;
-+ uint32_t ste_ssid_offset;
-+};
-+
-+struct acm_stats_buffer {
-+ uint32_t magic;
-+ uint32_t len;
-+ uint32_t primary_policy_code;
-+ uint32_t primary_stats_offset;
-+ uint32_t secondary_policy_code;
-+ uint32_t secondary_stats_offset;
-+};
-+
-+struct acm_ste_stats_buffer {
-+ uint32_t ec_eval_count;
-+ uint32_t gt_eval_count;
-+ uint32_t ec_denied_count;
-+ uint32_t gt_denied_count;
-+ uint32_t ec_cachehit_count;
-+ uint32_t gt_cachehit_count;
-+};
-+
-+struct acm_ssid_buffer {
-+ uint32_t len;
-+ ssidref_t ssidref;
-+ uint32_t primary_policy_code;
-+ uint32_t primary_max_types;
-+ uint32_t primary_types_offset;
-+ uint32_t secondary_policy_code;
-+ uint32_t secondary_max_types;
-+ uint32_t secondary_types_offset;
-+};
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/acm_ops.h b/include/xen/interface/acm_ops.h
-new file mode 100644
-index 0000000..76c35db
---- /dev/null
-+++ b/include/xen/interface/acm_ops.h
-@@ -0,0 +1,96 @@
-+/*
-+ * acm_ops.h: Xen access control module hypervisor commands
-+ *
-+ * Reiner Sailer <sailer@watson.ibm.com>
-+ * Copyright (c) 2005, International Business Machines Corporation.
-+ */
-+
-+#ifndef __XEN_PUBLIC_ACM_OPS_H__
-+#define __XEN_PUBLIC_ACM_OPS_H__
-+
-+#include "xen.h"
-+#include "sched_ctl.h"
-+
-+/*
-+ * Make sure you increment the interface version whenever you modify this file!
-+ * This makes sure that old versions of acm tools will stop working in a
-+ * well-defined way (rather than crashing the machine, for instance).
-+ */
-+#define ACM_INTERFACE_VERSION 0xAAAA0005
-+
-+/************************************************************************/
-+
-+#define ACM_SETPOLICY 4
-+struct acm_setpolicy {
-+ /* OUT variables */
-+ void *pushcache;
-+ uint32_t pushcache_size;
-+};
-+
-+
-+#define ACM_GETPOLICY 5
-+struct acm_getpolicy {
-+ /* OUT variables */
-+ void *pullcache;
-+ uint32_t pullcache_size;
-+};
-+
-+
-+#define ACM_DUMPSTATS 6
-+struct acm_dumpstats {
-+ void *pullcache;
-+ uint32_t pullcache_size;
-+};
-+
-+
-+#define ACM_GETSSID 7
-+enum get_type {UNSET=0, SSIDREF, DOMAINID};
-+struct acm_getssid {
-+ enum get_type get_ssid_by;
-+ union {
-+ domaintype_t domainid;
-+ ssidref_t ssidref;
-+ } id;
-+ void *ssidbuf;
-+ uint32_t ssidbuf_size;
-+};
-+
-+#define ACM_GETDECISION 8
-+struct acm_getdecision {
-+ enum get_type get_decision_by1; /* in */
-+ enum get_type get_decision_by2;
-+ union {
-+ domaintype_t domainid;
-+ ssidref_t ssidref;
-+ } id1;
-+ union {
-+ domaintype_t domainid;
-+ ssidref_t ssidref;
-+ } id2;
-+ enum acm_hook_type hook;
-+ int acm_decision; /* out */
-+};
-+
-+struct acm_op {
-+ uint32_t cmd;
-+ uint32_t interface_version; /* ACM_INTERFACE_VERSION */
-+ union {
-+ struct acm_setpolicy setpolicy;
-+ struct acm_getpolicy getpolicy;
-+ struct acm_dumpstats dumpstats;
-+ struct acm_getssid getssid;
-+ struct acm_getdecision getdecision;
-+ } u;
-+};
-+
-+#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/arch-ia64.h b/include/xen/interface/arch-ia64.h
-new file mode 100644
-index 0000000..316df5c
---- /dev/null
-+++ b/include/xen/interface/arch-ia64.h
-@@ -0,0 +1,302 @@
-+/******************************************************************************
-+ * arch-ia64/hypervisor-if.h
-+ *
-+ * Guest OS interface to IA64 Xen.
-+ */
-+
-+#ifndef __HYPERVISOR_IF_IA64_H__
-+#define __HYPERVISOR_IF_IA64_H__
-+
-+#ifdef __XEN__
-+#define __DEFINE_GUEST_HANDLE(name, type) \
-+ typedef struct { type *p; } __guest_handle_ ## name
-+#else
-+#define __DEFINE_GUEST_HANDLE(name, type) \
-+ typedef type * __guest_handle_ ## name
-+#endif
-+
-+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
-+#define GUEST_HANDLE(name) __guest_handle_ ## name
-+
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_GUEST_HANDLE(uint, unsigned int);
-+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
-+DEFINE_GUEST_HANDLE(char);
-+DEFINE_GUEST_HANDLE(int);
-+DEFINE_GUEST_HANDLE(long);
-+DEFINE_GUEST_HANDLE(void);
-+#endif
-+
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+/* WARNING: before changing this, check that shared_info fits on a page */
-+#define MAX_VIRT_CPUS 4
-+
-+#ifndef __ASSEMBLY__
-+
-+#define MAX_NR_SECTION 32 /* at most 32 memory holes */
-+typedef struct {
-+ unsigned long start; /* start of memory hole */
-+ unsigned long end; /* end of memory hole */
-+} mm_section_t;
-+
-+typedef struct {
-+ unsigned long mfn : 56;
-+ unsigned long type: 8;
-+} pmt_entry_t;
-+
-+#define GPFN_MEM (0UL << 56) /* Guest pfn is normal mem */
-+#define GPFN_FRAME_BUFFER (1UL << 56) /* VGA framebuffer */
-+#define GPFN_LOW_MMIO (2UL << 56) /* Low MMIO range */
-+#define GPFN_PIB (3UL << 56) /* PIB base */
-+#define GPFN_IOSAPIC (4UL << 56) /* IOSAPIC base */
-+#define GPFN_LEGACY_IO (5UL << 56) /* Legacy I/O base */
-+#define GPFN_GFW (6UL << 56) /* Guest Firmware */
-+#define GPFN_HIGH_MMIO (7UL << 56) /* High MMIO range */
-+
-+#define GPFN_IO_MASK (7UL << 56) /* Guest pfn is I/O type */
-+#define GPFN_INV_MASK (31UL << 59) /* Guest pfn is invalid */
-+
-+#define INVALID_MFN (~0UL)
-+
-+/*
-+ * NB. This may become a 64-bit count with no shift. If this happens then the
-+ * structure size will still be 8 bytes, so no other alignments will change.
-+ */
-+typedef struct {
-+ unsigned int tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
-+ unsigned int tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
-+} tsc_timestamp_t; /* 8 bytes */
-+
-+struct pt_fpreg {
-+ union {
-+ unsigned long bits[2];
-+ long double __dummy; /* force 16-byte alignment */
-+ } u;
-+};
-+
-+typedef struct cpu_user_regs{
-+ /* The following registers are saved by SAVE_MIN: */
-+ unsigned long b6; /* scratch */
-+ unsigned long b7; /* scratch */
-+
-+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
-+ unsigned long ar_ssd; /* reserved for future use (scratch) */
-+
-+ unsigned long r8; /* scratch (return value register 0) */
-+ unsigned long r9; /* scratch (return value register 1) */
-+ unsigned long r10; /* scratch (return value register 2) */
-+ unsigned long r11; /* scratch (return value register 3) */
-+
-+ unsigned long cr_ipsr; /* interrupted task's psr */
-+ unsigned long cr_iip; /* interrupted task's instruction pointer */
-+ unsigned long cr_ifs; /* interrupted task's function state */
-+
-+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
-+ unsigned long ar_pfs; /* prev function state */
-+ unsigned long ar_rsc; /* RSE configuration */
-+ /* The following two are valid only if cr_ipsr.cpl > 0: */
-+ unsigned long ar_rnat; /* RSE NaT */
-+ unsigned long ar_bspstore; /* RSE bspstore */
-+
-+ unsigned long pr; /* 64 predicate registers (1 bit each) */
-+ unsigned long b0; /* return pointer (bp) */
-+ unsigned long loadrs; /* size of dirty partition << 16 */
-+
-+ unsigned long r1; /* the gp pointer */
-+ unsigned long r12; /* interrupted task's memory stack pointer */
-+ unsigned long r13; /* thread pointer */
-+
-+ unsigned long ar_fpsr; /* floating point status (preserved) */
-+ unsigned long r15; /* scratch */
-+
-+ /* The remaining registers are NOT saved for system calls. */
-+
-+ unsigned long r14; /* scratch */
-+ unsigned long r2; /* scratch */
-+ unsigned long r3; /* scratch */
-+ unsigned long r16; /* scratch */
-+ unsigned long r17; /* scratch */
-+ unsigned long r18; /* scratch */
-+ unsigned long r19; /* scratch */
-+ unsigned long r20; /* scratch */
-+ unsigned long r21; /* scratch */
-+ unsigned long r22; /* scratch */
-+ unsigned long r23; /* scratch */
-+ unsigned long r24; /* scratch */
-+ unsigned long r25; /* scratch */
-+ unsigned long r26; /* scratch */
-+ unsigned long r27; /* scratch */
-+ unsigned long r28; /* scratch */
-+ unsigned long r29; /* scratch */
-+ unsigned long r30; /* scratch */
-+ unsigned long r31; /* scratch */
-+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
-+
-+ /*
-+ * Floating point registers that the kernel considers scratch:
-+ */
-+ struct pt_fpreg f6; /* scratch */
-+ struct pt_fpreg f7; /* scratch */
-+ struct pt_fpreg f8; /* scratch */
-+ struct pt_fpreg f9; /* scratch */
-+ struct pt_fpreg f10; /* scratch */
-+ struct pt_fpreg f11; /* scratch */
-+ unsigned long r4; /* preserved */
-+ unsigned long r5; /* preserved */
-+ unsigned long r6; /* preserved */
-+ unsigned long r7; /* preserved */
-+ unsigned long eml_unat; /* used for emulating instruction */
-+ unsigned long rfi_pfs; /* used for elulating rfi */
-+
-+}cpu_user_regs_t;
-+
-+typedef union {
-+ unsigned long value;
-+ struct {
-+ int a_int:1;
-+ int a_from_int_cr:1;
-+ int a_to_int_cr:1;
-+ int a_from_psr:1;
-+ int a_from_cpuid:1;
-+ int a_cover:1;
-+ int a_bsw:1;
-+ long reserved:57;
-+ };
-+} vac_t;
-+
-+typedef union {
-+ unsigned long value;
-+ struct {
-+ int d_vmsw:1;
-+ int d_extint:1;
-+ int d_ibr_dbr:1;
-+ int d_pmc:1;
-+ int d_to_pmd:1;
-+ int d_itm:1;
-+ long reserved:58;
-+ };
-+} vdc_t;
-+
-+typedef struct {
-+ vac_t vac;
-+ vdc_t vdc;
-+ unsigned long virt_env_vaddr;
-+ unsigned long reserved1[29];
-+ unsigned long vhpi;
-+ unsigned long reserved2[95];
-+ union {
-+ unsigned long vgr[16];
-+ unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
-+ };
-+ union {
-+ unsigned long vbgr[16];
-+ unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
-+ };
-+ unsigned long vnat;
-+ unsigned long vbnat;
-+ unsigned long vcpuid[5];
-+ unsigned long reserved3[11];
-+ unsigned long vpsr;
-+ unsigned long vpr;
-+ unsigned long reserved4[76];
-+ union {
-+ unsigned long vcr[128];
-+ struct {
-+ unsigned long dcr; // CR0
-+ unsigned long itm;
-+ unsigned long iva;
-+ unsigned long rsv1[5];
-+ unsigned long pta; // CR8
-+ unsigned long rsv2[7];
-+ unsigned long ipsr; // CR16
-+ unsigned long isr;
-+ unsigned long rsv3;
-+ unsigned long iip;
-+ unsigned long ifa;
-+ unsigned long itir;
-+ unsigned long iipa;
-+ unsigned long ifs;
-+ unsigned long iim; // CR24
-+ unsigned long iha;
-+ unsigned long rsv4[38];
-+ unsigned long lid; // CR64
-+ unsigned long ivr;
-+ unsigned long tpr;
-+ unsigned long eoi;
-+ unsigned long irr[4];
-+ unsigned long itv; // CR72
-+ unsigned long pmv;
-+ unsigned long cmcv;
-+ unsigned long rsv5[5];
-+ unsigned long lrr0; // CR80
-+ unsigned long lrr1;
-+ unsigned long rsv6[46];
-+ };
-+ };
-+ union {
-+ unsigned long reserved5[128];
-+ struct {
-+ unsigned long precover_ifs;
-+ unsigned long unat; // not sure if this is needed until NaT arch is done
-+ int interrupt_collection_enabled; // virtual psr.ic
-+ int interrupt_delivery_enabled; // virtual psr.i
-+ int pending_interruption;
-+ int incomplete_regframe; // see SDM vol2 6.8
-+ unsigned long delivery_mask[4];
-+ int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
-+ int banknum; // 0 or 1, which virtual register bank is active
-+ unsigned long rrs[8]; // region registers
-+ unsigned long krs[8]; // kernel registers
-+ unsigned long pkrs[8]; // protection key registers
-+ unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
-+ // FIXME: tmp[8] temp'ly being used for virtual psr.pp
-+ };
-+ };
-+ unsigned long reserved6[3456];
-+ unsigned long vmm_avail[128];
-+ unsigned long reserved7[4096];
-+} mapped_regs_t;
-+
-+typedef struct {
-+ mapped_regs_t *privregs;
-+ int evtchn_vector;
-+} arch_vcpu_info_t;
-+
-+typedef mapped_regs_t vpd_t;
-+
-+typedef struct {
-+ unsigned int flags;
-+ unsigned long start_info_pfn;
-+} arch_shared_info_t;
-+
-+typedef struct vcpu_guest_context {
-+#define VGCF_FPU_VALID (1<<0)
-+#define VGCF_VMX_GUEST (1<<1)
-+#define VGCF_IN_KERNEL (1<<2)
-+ unsigned long flags; /* VGCF_* flags */
-+ unsigned long pt_base; /* PMT table base */
-+ unsigned long share_io_pg; /* Shared page for I/O emulation */
-+ unsigned long sys_pgnr; /* System pages out of domain memory */
-+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap, now none on IPF */
-+
-+ cpu_user_regs_t regs;
-+ arch_vcpu_info_t vcpu;
-+ arch_shared_info_t shared;
-+} vcpu_guest_context_t;
-+DEFINE_GUEST_HANDLE(vcpu_guest_context_t);
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif /* __HYPERVISOR_IF_IA64_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/arch-x86_32.h b/include/xen/interface/arch-x86_32.h
-new file mode 100644
-index 0000000..f7079eb
---- /dev/null
-+++ b/include/xen/interface/arch-x86_32.h
-@@ -0,0 +1,181 @@
-+/******************************************************************************
-+ * arch-x86_32.h
-+ *
-+ * Guest OS interface to x86 32-bit Xen.
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
-+#define __XEN_PUBLIC_ARCH_X86_32_H__
-+
-+#ifdef __XEN__
-+#define __DEFINE_GUEST_HANDLE(name, type) \
-+ typedef struct { type *p; } __guest_handle_ ## name
-+#else
-+#define __DEFINE_GUEST_HANDLE(name, type) \
-+ typedef type * __guest_handle_ ## name
-+#endif
-+
-+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
-+#define GUEST_HANDLE(name) __guest_handle_ ## name
-+
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_GUEST_HANDLE(uint, unsigned int);
-+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
-+DEFINE_GUEST_HANDLE(char);
-+DEFINE_GUEST_HANDLE(int);
-+DEFINE_GUEST_HANDLE(long);
-+DEFINE_GUEST_HANDLE(void);
-+#endif
-+
-+/*
-+ * SEGMENT DESCRIPTOR TABLES
-+ */
-+/*
-+ * A number of GDT entries are reserved by Xen. These are not situated at the
-+ * start of the GDT because some stupid OSes export hard-coded selector values
-+ * in their ABI. These hard-coded values are always near the start of the GDT,
-+ * so Xen places itself out of the way, at the far end of the GDT.
-+ */
-+#define FIRST_RESERVED_GDT_PAGE 14
-+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
-+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
-+
-+/*
-+ * These flat segments are in the Xen-private section of every GDT. Since these
-+ * are also present in the initial GDT, many OSes will be able to avoid
-+ * installing their own GDT.
-+ */
-+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
-+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
-+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
-+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
-+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
-+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
-+
-+#define FLAT_KERNEL_CS FLAT_RING1_CS
-+#define FLAT_KERNEL_DS FLAT_RING1_DS
-+#define FLAT_KERNEL_SS FLAT_RING1_SS
-+#define FLAT_USER_CS FLAT_RING3_CS
-+#define FLAT_USER_DS FLAT_RING3_DS
-+#define FLAT_USER_SS FLAT_RING3_SS
-+
-+/* And the trap vector is... */
-+#define TRAP_INSTR "int $0x82"
-+
-+/*
-+ * Virtual addresses beyond this are not modifiable by guest OSes. The
-+ * machine->physical mapping table starts at this address, read-only.
-+ */
-+#ifdef CONFIG_X86_PAE
-+#define __HYPERVISOR_VIRT_START 0xF5800000
-+#else
-+#define __HYPERVISOR_VIRT_START 0xFC000000
-+#endif
-+
-+#ifndef HYPERVISOR_VIRT_START
-+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-+#endif
-+
-+#ifndef machine_to_phys_mapping
-+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-+#endif
-+
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+#define MAX_VIRT_CPUS 32
-+
-+#ifndef __ASSEMBLY__
-+
-+/*
-+ * Send an array of these to HYPERVISOR_set_trap_table()
-+ */
-+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
-+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
-+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
-+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
-+typedef struct trap_info {
-+ uint8_t vector; /* exception vector */
-+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
-+ uint16_t cs; /* code selector */
-+ unsigned long address; /* code offset */
-+} trap_info_t;
-+
-+typedef struct cpu_user_regs {
-+ uint32_t ebx;
-+ uint32_t ecx;
-+ uint32_t edx;
-+ uint32_t esi;
-+ uint32_t edi;
-+ uint32_t ebp;
-+ uint32_t eax;
-+ uint16_t error_code; /* private */
-+ uint16_t entry_vector; /* private */
-+ uint32_t eip;
-+ uint16_t cs;
-+ uint8_t saved_upcall_mask;
-+ uint8_t _pad0;
-+ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
-+ uint32_t esp;
-+ uint16_t ss, _pad1;
-+ uint16_t es, _pad2;
-+ uint16_t ds, _pad3;
-+ uint16_t fs, _pad4;
-+ uint16_t gs, _pad5;
-+} cpu_user_regs_t;
-+
-+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
-+
-+/*
-+ * The following is all CPU context. Note that the fpu_ctxt block is filled
-+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
-+ */
-+typedef struct vcpu_guest_context {
-+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
-+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
-+#define VGCF_I387_VALID (1<<0)
-+#define VGCF_VMX_GUEST (1<<1)
-+#define VGCF_IN_KERNEL (1<<2)
-+ unsigned long flags; /* VGCF_* flags */
-+ cpu_user_regs_t user_regs; /* User-level CPU registers */
-+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
-+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
-+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
-+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
-+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
-+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
-+ unsigned long event_callback_cs; /* CS:EIP of event callback */
-+ unsigned long event_callback_eip;
-+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
-+ unsigned long failsafe_callback_eip;
-+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
-+} vcpu_guest_context_t;
-+DEFINE_GUEST_HANDLE(vcpu_guest_context_t);
-+
-+typedef struct arch_shared_info {
-+ unsigned long max_pfn; /* max pfn that appears in table */
-+ /* Frame containing list of mfns containing list of mfns containing p2m. */
-+ unsigned long pfn_to_mfn_frame_list_list;
-+ unsigned long nmi_reason;
-+} arch_shared_info_t;
-+
-+typedef struct {
-+ unsigned long cr2;
-+ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
-+} arch_vcpu_info_t;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/arch-x86_64.h b/include/xen/interface/arch-x86_64.h
-new file mode 100644
-index 0000000..f0ea2f5
---- /dev/null
-+++ b/include/xen/interface/arch-x86_64.h
-@@ -0,0 +1,266 @@
-+/******************************************************************************
-+ * arch-x86_64.h
-+ *
-+ * Guest OS interface to x86 64-bit Xen.
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_ARCH_X86_64_H__
-+#define __XEN_PUBLIC_ARCH_X86_64_H__
-+
-+#ifdef __XEN__
-+#define __DEFINE_GUEST_HANDLE(name, type) \
-+ typedef struct { type *p; } __guest_handle_ ## name
-+#else
-+#define __DEFINE_GUEST_HANDLE(name, type) \
-+ typedef type * __guest_handle_ ## name
-+#endif
-+
-+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
-+#define GUEST_HANDLE(name) __guest_handle_ ## name
-+
-+#ifndef __ASSEMBLY__
-+/* Guest handles for primitive C types. */
-+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-+__DEFINE_GUEST_HANDLE(uint, unsigned int);
-+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
-+DEFINE_GUEST_HANDLE(char);
-+DEFINE_GUEST_HANDLE(int);
-+DEFINE_GUEST_HANDLE(long);
-+DEFINE_GUEST_HANDLE(void);
-+#endif
-+
-+/*
-+ * SEGMENT DESCRIPTOR TABLES
-+ */
-+/*
-+ * A number of GDT entries are reserved by Xen. These are not situated at the
-+ * start of the GDT because some stupid OSes export hard-coded selector values
-+ * in their ABI. These hard-coded values are always near the start of the GDT,
-+ * so Xen places itself out of the way, at the far end of the GDT.
-+ */
-+#define FIRST_RESERVED_GDT_PAGE 14
-+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
-+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
-+
-+/*
-+ * 64-bit segment selectors
-+ * These flat segments are in the Xen-private section of every GDT. Since these
-+ * are also present in the initial GDT, many OSes will be able to avoid
-+ * installing their own GDT.
-+ */
-+
-+#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
-+#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
-+#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
-+#define FLAT_RING3_DS64 0x0000 /* NULL selector */
-+#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
-+#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
-+
-+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
-+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
-+#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
-+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
-+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
-+#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
-+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
-+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
-+#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
-+
-+#define FLAT_USER_DS64 FLAT_RING3_DS64
-+#define FLAT_USER_DS32 FLAT_RING3_DS32
-+#define FLAT_USER_DS FLAT_USER_DS64
-+#define FLAT_USER_CS64 FLAT_RING3_CS64
-+#define FLAT_USER_CS32 FLAT_RING3_CS32
-+#define FLAT_USER_CS FLAT_USER_CS64
-+#define FLAT_USER_SS64 FLAT_RING3_SS64
-+#define FLAT_USER_SS32 FLAT_RING3_SS32
-+#define FLAT_USER_SS FLAT_USER_SS64
-+
-+/* And the trap vector is... */
-+#define TRAP_INSTR "syscall"
-+
-+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
-+#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
-+
-+#ifndef HYPERVISOR_VIRT_START
-+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-+#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
-+#endif
-+
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+#define MAX_VIRT_CPUS 32
-+
-+#ifndef __ASSEMBLY__
-+
-+/* The machine->physical mapping table starts at this address, read-only. */
-+#ifndef machine_to_phys_mapping
-+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-+#endif
-+
-+/*
-+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
-+ * @which == SEGBASE_* ; @base == 64-bit base address
-+ * Returns 0 on success.
-+ */
-+#define SEGBASE_FS 0
-+#define SEGBASE_GS_USER 1
-+#define SEGBASE_GS_KERNEL 2
-+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
-+
-+/*
-+ * int HYPERVISOR_iret(void)
-+ * All arguments are on the kernel stack, in the following format.
-+ * Never returns if successful. Current kernel context is lost.
-+ * The saved CS is mapped as follows:
-+ * RING0 -> RING3 kernel mode.
-+ * RING1 -> RING3 kernel mode.
-+ * RING2 -> RING3 kernel mode.
-+ * RING3 -> RING3 user mode.
-+ * However RING0 indicates that the guest kernel should return to iteself
-+ * directly with
-+ * orb $3,1*8(%rsp)
-+ * iretq
-+ * If flags contains VGCF_IN_SYSCALL:
-+ * Restore RAX, RIP, RFLAGS, RSP.
-+ * Discard R11, RCX, CS, SS.
-+ * Otherwise:
-+ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
-+ * All other registers are saved on hypercall entry and restored to user.
-+ */
-+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
-+#define VGCF_IN_SYSCALL (1<<8)
-+struct iret_context {
-+ /* Top of stack (%rsp at point of hypercall). */
-+ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+ /* Bottom of iret stack frame. */
-+};
-+/*
-+ * For compatibility with HYPERVISOR_switch_to_user which is the old
-+ * name for HYPERVISOR_iret.
-+ */
-+struct switch_to_user {
-+ /* Top of stack (%rsp at point of hypercall). */
-+ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+ /* Bottom of iret stack frame. */
-+};
-+
-+/*
-+ * Send an array of these to HYPERVISOR_set_trap_table().
-+ * N.B. As in x86/32 mode, the privilege level specifies which modes may enter
-+ * a trap via a software interrupt. Since rings 1 and 2 are unavailable, we
-+ * allocate privilege levels as follows:
-+ * Level == 0: Noone may enter
-+ * Level == 1: Kernel may enter
-+ * Level == 2: Kernel may enter
-+ * Level == 3: Everyone may enter
-+ */
-+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
-+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
-+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
-+#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
-+typedef struct trap_info {
-+ uint8_t vector; /* exception vector */
-+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
-+ uint16_t cs; /* code selector */
-+ unsigned long address; /* code offset */
-+} trap_info_t;
-+
-+#ifdef __GNUC__
-+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
-+#define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
-+#else
-+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
-+#define __DECL_REG(name) uint64_t r ## name
-+#endif
-+
-+typedef struct cpu_user_regs {
-+ uint64_t r15;
-+ uint64_t r14;
-+ uint64_t r13;
-+ uint64_t r12;
-+ __DECL_REG(bp);
-+ __DECL_REG(bx);
-+ uint64_t r11;
-+ uint64_t r10;
-+ uint64_t r9;
-+ uint64_t r8;
-+ __DECL_REG(ax);
-+ __DECL_REG(cx);
-+ __DECL_REG(dx);
-+ __DECL_REG(si);
-+ __DECL_REG(di);
-+ uint32_t error_code; /* private */
-+ uint32_t entry_vector; /* private */
-+ __DECL_REG(ip);
-+ uint16_t cs, _pad0[1];
-+ uint8_t saved_upcall_mask;
-+ uint8_t _pad1[3];
-+ __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
-+ __DECL_REG(sp);
-+ uint16_t ss, _pad2[3];
-+ uint16_t es, _pad3[3];
-+ uint16_t ds, _pad4[3];
-+ uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
-+ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
-+} cpu_user_regs_t;
-+
-+#undef __DECL_REG
-+
-+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
-+
-+/*
-+ * The following is all CPU context. Note that the fpu_ctxt block is filled
-+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
-+ */
-+typedef struct vcpu_guest_context {
-+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
-+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
-+#define VGCF_I387_VALID (1<<0)
-+#define VGCF_VMX_GUEST (1<<1)
-+#define VGCF_IN_KERNEL (1<<2)
-+ unsigned long flags; /* VGCF_* flags */
-+ cpu_user_regs_t user_regs; /* User-level CPU registers */
-+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
-+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
-+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
-+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
-+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
-+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
-+ unsigned long event_callback_eip;
-+ unsigned long failsafe_callback_eip;
-+ unsigned long syscall_callback_eip;
-+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
-+ /* Segment base addresses. */
-+ uint64_t fs_base;
-+ uint64_t gs_base_kernel;
-+ uint64_t gs_base_user;
-+} vcpu_guest_context_t;
-+DEFINE_GUEST_HANDLE(vcpu_guest_context_t);
-+
-+typedef struct arch_shared_info {
-+ unsigned long max_pfn; /* max pfn that appears in table */
-+ /* Frame containing list of mfns containing list of mfns containing p2m. */
-+ unsigned long pfn_to_mfn_frame_list_list;
-+ unsigned long nmi_reason;
-+} arch_shared_info_t;
-+
-+typedef struct {
-+ unsigned long cr2;
-+ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
-+} arch_vcpu_info_t;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/dom0_ops.h b/include/xen/interface/dom0_ops.h
-new file mode 100644
-index 0000000..89ea35c
---- /dev/null
-+++ b/include/xen/interface/dom0_ops.h
-@@ -0,0 +1,530 @@
-+/******************************************************************************
-+ * dom0_ops.h
-+ *
-+ * Process command requests from domain-0 guest OS.
-+ *
-+ * Copyright (c) 2002-2003, B Dragovic
-+ * Copyright (c) 2002-2004, K Fraser
-+ */
-+
-+
-+#ifndef __XEN_PUBLIC_DOM0_OPS_H__
-+#define __XEN_PUBLIC_DOM0_OPS_H__
-+
-+#include "xen.h"
-+#include "sched_ctl.h"
-+
-+/*
-+ * Make sure you increment the interface version whenever you modify this file!
-+ * This makes sure that old versions of dom0 tools will stop working in a
-+ * well-defined way (rather than crashing the machine, for instance).
-+ */
-+#define DOM0_INTERFACE_VERSION 0x03000000
-+
-+/************************************************************************/
-+
-+#define DOM0_GETMEMLIST 2
-+typedef struct dom0_getmemlist {
-+ /* IN variables. */
-+ domid_t domain;
-+ unsigned long max_pfns;
-+ GUEST_HANDLE(ulong) buffer;
-+ /* OUT variables. */
-+ unsigned long num_pfns;
-+} dom0_getmemlist_t;
-+DEFINE_GUEST_HANDLE(dom0_getmemlist_t);
-+
-+#define DOM0_SCHEDCTL 6
-+ /* struct sched_ctl_cmd is from sched-ctl.h */
-+typedef struct sched_ctl_cmd dom0_schedctl_t;
-+DEFINE_GUEST_HANDLE(dom0_schedctl_t);
-+
-+#define DOM0_ADJUSTDOM 7
-+/* struct sched_adjdom_cmd is from sched-ctl.h */
-+typedef struct sched_adjdom_cmd dom0_adjustdom_t;
-+DEFINE_GUEST_HANDLE(dom0_adjustdom_t);
-+
-+#define DOM0_CREATEDOMAIN 8
-+typedef struct dom0_createdomain {
-+ /* IN parameters */
-+ uint32_t ssidref;
-+ xen_domain_handle_t handle;
-+ /* IN/OUT parameters. */
-+ /* Identifier for new domain (auto-allocate if zero is specified). */
-+ domid_t domain;
-+} dom0_createdomain_t;
-+DEFINE_GUEST_HANDLE(dom0_createdomain_t);
-+
-+#define DOM0_DESTROYDOMAIN 9
-+typedef struct dom0_destroydomain {
-+ /* IN variables. */
-+ domid_t domain;
-+} dom0_destroydomain_t;
-+DEFINE_GUEST_HANDLE(dom0_destroydomain_t);
-+
-+#define DOM0_PAUSEDOMAIN 10
-+typedef struct dom0_pausedomain {
-+ /* IN parameters. */
-+ domid_t domain;
-+} dom0_pausedomain_t;
-+DEFINE_GUEST_HANDLE(dom0_pausedomain_t);
-+
-+#define DOM0_UNPAUSEDOMAIN 11
-+typedef struct dom0_unpausedomain {
-+ /* IN parameters. */
-+ domid_t domain;
-+} dom0_unpausedomain_t;
-+DEFINE_GUEST_HANDLE(dom0_unpausedomain_t);
-+
-+#define DOM0_GETDOMAININFO 12
-+typedef struct dom0_getdomaininfo {
-+ /* IN variables. */
-+ domid_t domain; /* NB. IN/OUT variable. */
-+ /* OUT variables. */
-+#define DOMFLAGS_DYING (1<<0) /* Domain is scheduled to die. */
-+#define DOMFLAGS_SHUTDOWN (1<<2) /* The guest OS has shut down. */
-+#define DOMFLAGS_PAUSED (1<<3) /* Currently paused by control software. */
-+#define DOMFLAGS_BLOCKED (1<<4) /* Currently blocked pending an event. */
-+#define DOMFLAGS_RUNNING (1<<5) /* Domain is currently running. */
-+#define DOMFLAGS_CPUMASK 255 /* CPU to which this domain is bound. */
-+#define DOMFLAGS_CPUSHIFT 8
-+#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */
-+#define DOMFLAGS_SHUTDOWNSHIFT 16
-+ uint32_t flags;
-+ unsigned long tot_pages;
-+ unsigned long max_pages;
-+ unsigned long shared_info_frame; /* MFN of shared_info struct */
-+ uint64_t cpu_time;
-+ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
-+ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
-+ uint32_t ssidref;
-+ xen_domain_handle_t handle;
-+} dom0_getdomaininfo_t;
-+DEFINE_GUEST_HANDLE(dom0_getdomaininfo_t);
-+
-+#define DOM0_SETVCPUCONTEXT 13
-+typedef struct dom0_setvcpucontext {
-+ /* IN variables. */
-+ domid_t domain;
-+ uint32_t vcpu;
-+ /* IN/OUT parameters */
-+ GUEST_HANDLE(vcpu_guest_context_t) ctxt;
-+} dom0_setvcpucontext_t;
-+DEFINE_GUEST_HANDLE(dom0_setvcpucontext_t);
-+
-+#define DOM0_MSR 15
-+typedef struct dom0_msr {
-+ /* IN variables. */
-+ uint32_t write;
-+ cpumap_t cpu_mask;
-+ uint32_t msr;
-+ uint32_t in1;
-+ uint32_t in2;
-+ /* OUT variables. */
-+ uint32_t out1;
-+ uint32_t out2;
-+} dom0_msr_t;
-+DEFINE_GUEST_HANDLE(dom0_msr_t);
-+
-+/*
-+ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
-+ * 1 January, 1970 if the current system time was <system_time>.
-+ */
-+#define DOM0_SETTIME 17
-+typedef struct dom0_settime {
-+ /* IN variables. */
-+ uint32_t secs;
-+ uint32_t nsecs;
-+ uint64_t system_time;
-+} dom0_settime_t;
-+DEFINE_GUEST_HANDLE(dom0_settime_t);
-+
-+#define DOM0_GETPAGEFRAMEINFO 18
-+#define NOTAB 0 /* normal page */
-+#define L1TAB (1<<28)
-+#define L2TAB (2<<28)
-+#define L3TAB (3<<28)
-+#define L4TAB (4<<28)
-+#define LPINTAB (1<<31)
-+#define XTAB (0xf<<28) /* invalid page */
-+#define LTAB_MASK XTAB
-+#define LTABTYPE_MASK (0x7<<28)
-+
-+typedef struct dom0_getpageframeinfo {
-+ /* IN variables. */
-+ unsigned long mfn; /* Machine page frame number to query. */
-+ domid_t domain; /* To which domain does the frame belong? */
-+ /* OUT variables. */
-+ /* Is the page PINNED to a type? */
-+ uint32_t type; /* see above type defs */
-+} dom0_getpageframeinfo_t;
-+DEFINE_GUEST_HANDLE(dom0_getpageframeinfo_t);
-+
-+/*
-+ * Read console content from Xen buffer ring.
-+ */
-+#define DOM0_READCONSOLE 19
-+typedef struct dom0_readconsole {
-+ /* IN variables. */
-+ uint32_t clear; /* Non-zero -> clear after reading. */
-+ /* IN/OUT variables. */
-+ GUEST_HANDLE(char) buffer; /* In: Buffer start; Out: Used buffer start */
-+ uint32_t count; /* In: Buffer size; Out: Used buffer size */
-+} dom0_readconsole_t;
-+DEFINE_GUEST_HANDLE(dom0_readconsole_t);
-+
-+/*
-+ * Set which physical cpus a vcpu can execute on.
-+ */
-+#define DOM0_SETVCPUAFFINITY 20
-+typedef struct dom0_setvcpuaffinity {
-+ /* IN variables. */
-+ domid_t domain;
-+ uint32_t vcpu;
-+ cpumap_t cpumap;
-+} dom0_setvcpuaffinity_t;
-+DEFINE_GUEST_HANDLE(dom0_setvcpuaffinity_t);
-+
-+/* Get trace buffers machine base address */
-+#define DOM0_TBUFCONTROL 21
-+typedef struct dom0_tbufcontrol {
-+ /* IN variables */
-+#define DOM0_TBUF_GET_INFO 0
-+#define DOM0_TBUF_SET_CPU_MASK 1
-+#define DOM0_TBUF_SET_EVT_MASK 2
-+#define DOM0_TBUF_SET_SIZE 3
-+#define DOM0_TBUF_ENABLE 4
-+#define DOM0_TBUF_DISABLE 5
-+ uint32_t op;
-+ /* IN/OUT variables */
-+ cpumap_t cpu_mask;
-+ uint32_t evt_mask;
-+ /* OUT variables */
-+ unsigned long buffer_mfn;
-+ uint32_t size;
-+} dom0_tbufcontrol_t;
-+DEFINE_GUEST_HANDLE(dom0_tbufcontrol_t);
-+
-+/*
-+ * Get physical information about the host machine
-+ */
-+#define DOM0_PHYSINFO 22
-+typedef struct dom0_physinfo {
-+ uint32_t threads_per_core;
-+ uint32_t cores_per_socket;
-+ uint32_t sockets_per_node;
-+ uint32_t nr_nodes;
-+ uint32_t cpu_khz;
-+ unsigned long total_pages;
-+ unsigned long free_pages;
-+ uint32_t hw_cap[8];
-+} dom0_physinfo_t;
-+DEFINE_GUEST_HANDLE(dom0_physinfo_t);
-+
-+/*
-+ * Get the ID of the current scheduler.
-+ */
-+#define DOM0_SCHED_ID 24
-+typedef struct dom0_sched_id {
-+ /* OUT variable */
-+ uint32_t sched_id;
-+} dom0_sched_id_t;
-+DEFINE_GUEST_HANDLE(dom0_sched_id_t);
-+
-+/*
-+ * Control shadow pagetables operation
-+ */
-+#define DOM0_SHADOW_CONTROL 25
-+
-+#define DOM0_SHADOW_CONTROL_OP_OFF 0
-+#define DOM0_SHADOW_CONTROL_OP_ENABLE_TEST 1
-+#define DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY 2
-+#define DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE 3
-+
-+#define DOM0_SHADOW_CONTROL_OP_FLUSH 10 /* table ops */
-+#define DOM0_SHADOW_CONTROL_OP_CLEAN 11
-+#define DOM0_SHADOW_CONTROL_OP_PEEK 12
-+
-+typedef struct dom0_shadow_control_stats {
-+ uint32_t fault_count;
-+ uint32_t dirty_count;
-+ uint32_t dirty_net_count;
-+ uint32_t dirty_block_count;
-+} dom0_shadow_control_stats_t;
-+DEFINE_GUEST_HANDLE(dom0_shadow_control_stats_t);
-+
-+typedef struct dom0_shadow_control {
-+ /* IN variables. */
-+ domid_t domain;
-+ uint32_t op;
-+ GUEST_HANDLE(ulong) dirty_bitmap;
-+ /* IN/OUT variables. */
-+ unsigned long pages; /* size of buffer, updated with actual size */
-+ /* OUT variables. */
-+ dom0_shadow_control_stats_t stats;
-+} dom0_shadow_control_t;
-+DEFINE_GUEST_HANDLE(dom0_shadow_control_t);
-+
-+#define DOM0_SETDOMAINMAXMEM 28
-+typedef struct dom0_setdomainmaxmem {
-+ /* IN variables. */
-+ domid_t domain;
-+ unsigned long max_memkb;
-+} dom0_setdomainmaxmem_t;
-+DEFINE_GUEST_HANDLE(dom0_setdomainmaxmem_t);
-+
-+#define DOM0_GETPAGEFRAMEINFO2 29 /* batched interface */
-+typedef struct dom0_getpageframeinfo2 {
-+ /* IN variables. */
-+ domid_t domain;
-+ unsigned long num;
-+ /* IN/OUT variables. */
-+ GUEST_HANDLE(ulong) array;
-+} dom0_getpageframeinfo2_t;
-+DEFINE_GUEST_HANDLE(dom0_getpageframeinfo2_t);
-+
-+/*
-+ * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
-+ * On x86, @type is an architecture-defined MTRR memory type.
-+ * On success, returns the MTRR that was used (@reg) and a handle that can
-+ * be passed to DOM0_DEL_MEMTYPE to accurately tear down the new setting.
-+ * (x86-specific).
-+ */
-+#define DOM0_ADD_MEMTYPE 31
-+typedef struct dom0_add_memtype {
-+ /* IN variables. */
-+ unsigned long mfn;
-+ unsigned long nr_mfns;
-+ uint32_t type;
-+ /* OUT variables. */
-+ uint32_t handle;
-+ uint32_t reg;
-+} dom0_add_memtype_t;
-+DEFINE_GUEST_HANDLE(dom0_add_memtype_t);
-+
-+/*
-+ * Tear down an existing memory-range type. If @handle is remembered then it
-+ * should be passed in to accurately tear down the correct setting (in case
-+ * of overlapping memory regions with differing types). If it is not known
-+ * then @handle should be set to zero. In all cases @reg must be set.
-+ * (x86-specific).
-+ */
-+#define DOM0_DEL_MEMTYPE 32
-+typedef struct dom0_del_memtype {
-+ /* IN variables. */
-+ uint32_t handle;
-+ uint32_t reg;
-+} dom0_del_memtype_t;
-+DEFINE_GUEST_HANDLE(dom0_del_memtype_t);
-+
-+/* Read current type of an MTRR (x86-specific). */
-+#define DOM0_READ_MEMTYPE 33
-+typedef struct dom0_read_memtype {
-+ /* IN variables. */
-+ uint32_t reg;
-+ /* OUT variables. */
-+ unsigned long mfn;
-+ unsigned long nr_mfns;
-+ uint32_t type;
-+} dom0_read_memtype_t;
-+DEFINE_GUEST_HANDLE(dom0_read_memtype_t);
-+
-+/* Interface for controlling Xen software performance counters. */
-+#define DOM0_PERFCCONTROL 34
-+/* Sub-operations: */
-+#define DOM0_PERFCCONTROL_OP_RESET 1 /* Reset all counters to zero. */
-+#define DOM0_PERFCCONTROL_OP_QUERY 2 /* Get perfctr information. */
-+typedef struct dom0_perfc_desc {
-+ char name[80]; /* name of perf counter */
-+ uint32_t nr_vals; /* number of values for this counter */
-+ uint32_t vals[64]; /* array of values */
-+} dom0_perfc_desc_t;
-+DEFINE_GUEST_HANDLE(dom0_perfc_desc_t);
-+typedef struct dom0_perfccontrol {
-+ /* IN variables. */
-+ uint32_t op; /* DOM0_PERFCCONTROL_OP_??? */
-+ /* OUT variables. */
-+ uint32_t nr_counters; /* number of counters */
-+ GUEST_HANDLE(dom0_perfc_desc_t) desc; /* counter information (or NULL) */
-+} dom0_perfccontrol_t;
-+DEFINE_GUEST_HANDLE(dom0_perfccontrol_t);
-+
-+#define DOM0_MICROCODE 35
-+typedef struct dom0_microcode {
-+ /* IN variables. */
-+ GUEST_HANDLE(void) data; /* Pointer to microcode data */
-+ uint32_t length; /* Length of microcode data. */
-+} dom0_microcode_t;
-+DEFINE_GUEST_HANDLE(dom0_microcode_t);
-+
-+#define DOM0_IOPORT_PERMISSION 36
-+typedef struct dom0_ioport_permission {
-+ domid_t domain; /* domain to be affected */
-+ uint32_t first_port; /* first port int range */
-+ uint32_t nr_ports; /* size of port range */
-+ uint8_t allow_access; /* allow or deny access to range? */
-+} dom0_ioport_permission_t;
-+DEFINE_GUEST_HANDLE(dom0_ioport_permission_t);
-+
-+#define DOM0_GETVCPUCONTEXT 37
-+typedef struct dom0_getvcpucontext {
-+ /* IN variables. */
-+ domid_t domain; /* domain to be affected */
-+ uint32_t vcpu; /* vcpu # */
-+ /* OUT variables. */
-+ GUEST_HANDLE(vcpu_guest_context_t) ctxt;
-+} dom0_getvcpucontext_t;
-+DEFINE_GUEST_HANDLE(dom0_getvcpucontext_t);
-+
-+#define DOM0_GETVCPUINFO 43
-+typedef struct dom0_getvcpuinfo {
-+ /* IN variables. */
-+ domid_t domain; /* domain to be affected */
-+ uint32_t vcpu; /* vcpu # */
-+ /* OUT variables. */
-+ uint8_t online; /* currently online (not hotplugged)? */
-+ uint8_t blocked; /* blocked waiting for an event? */
-+ uint8_t running; /* currently scheduled on its CPU? */
-+ uint64_t cpu_time; /* total cpu time consumed (ns) */
-+ uint32_t cpu; /* current mapping */
-+ cpumap_t cpumap; /* allowable mapping */
-+} dom0_getvcpuinfo_t;
-+DEFINE_GUEST_HANDLE(dom0_getvcpuinfo_t);
-+
-+#define DOM0_GETDOMAININFOLIST 38
-+typedef struct dom0_getdomaininfolist {
-+ /* IN variables. */
-+ domid_t first_domain;
-+ uint32_t max_domains;
-+ GUEST_HANDLE(dom0_getdomaininfo_t) buffer;
-+ /* OUT variables. */
-+ uint32_t num_domains;
-+} dom0_getdomaininfolist_t;
-+DEFINE_GUEST_HANDLE(dom0_getdomaininfolist_t);
-+
-+#define DOM0_PLATFORM_QUIRK 39
-+#define QUIRK_NOIRQBALANCING 1
-+typedef struct dom0_platform_quirk {
-+ /* IN variables. */
-+ uint32_t quirk_id;
-+} dom0_platform_quirk_t;
-+DEFINE_GUEST_HANDLE(dom0_platform_quirk_t);
-+
-+#define DOM0_PHYSICAL_MEMORY_MAP 40
-+typedef struct dom0_memory_map_entry {
-+ uint64_t start, end;
-+ uint32_t flags; /* reserved */
-+ uint8_t is_ram;
-+} dom0_memory_map_entry_t;
-+DEFINE_GUEST_HANDLE(dom0_memory_map_entry_t);
-+typedef struct dom0_physical_memory_map {
-+ /* IN variables. */
-+ uint32_t max_map_entries;
-+ /* OUT variables. */
-+ uint32_t nr_map_entries;
-+ GUEST_HANDLE(dom0_memory_map_entry_t) memory_map;
-+} dom0_physical_memory_map_t;
-+DEFINE_GUEST_HANDLE(dom0_physical_memory_map_t);
-+
-+#define DOM0_MAX_VCPUS 41
-+typedef struct dom0_max_vcpus {
-+ domid_t domain; /* domain to be affected */
-+ uint32_t max; /* maximum number of vcpus */
-+} dom0_max_vcpus_t;
-+DEFINE_GUEST_HANDLE(dom0_max_vcpus_t);
-+
-+#define DOM0_SETDOMAINHANDLE 44
-+typedef struct dom0_setdomainhandle {
-+ domid_t domain;
-+ xen_domain_handle_t handle;
-+} dom0_setdomainhandle_t;
-+DEFINE_GUEST_HANDLE(dom0_setdomainhandle_t);
-+
-+#define DOM0_SETDEBUGGING 45
-+typedef struct dom0_setdebugging {
-+ domid_t domain;
-+ uint8_t enable;
-+} dom0_setdebugging_t;
-+DEFINE_GUEST_HANDLE(dom0_setdebugging_t);
-+
-+#define DOM0_IRQ_PERMISSION 46
-+typedef struct dom0_irq_permission {
-+ domid_t domain; /* domain to be affected */
-+ uint8_t pirq;
-+ uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
-+} dom0_irq_permission_t;
-+DEFINE_GUEST_HANDLE(dom0_irq_permission_t);
-+
-+#define DOM0_IOMEM_PERMISSION 47
-+typedef struct dom0_iomem_permission {
-+ domid_t domain; /* domain to be affected */
-+ unsigned long first_mfn; /* first page (physical page number) in range */
-+ unsigned long nr_mfns; /* number of pages in range (>0) */
-+ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
-+} dom0_iomem_permission_t;
-+DEFINE_GUEST_HANDLE(dom0_iomem_permission_t);
-+
-+#define DOM0_HYPERCALL_INIT 48
-+typedef struct dom0_hypercall_init {
-+ domid_t domain; /* domain to be affected */
-+ unsigned long mfn; /* machine frame to be initialised */
-+} dom0_hypercall_init_t;
-+DEFINE_GUEST_HANDLE(dom0_hypercall_init_t);
-+
-+typedef struct dom0_op {
-+ uint32_t cmd;
-+ uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
-+ union {
-+ struct dom0_createdomain createdomain;
-+ struct dom0_pausedomain pausedomain;
-+ struct dom0_unpausedomain unpausedomain;
-+ struct dom0_destroydomain destroydomain;
-+ struct dom0_getmemlist getmemlist;
-+ struct sched_ctl_cmd schedctl;
-+ struct sched_adjdom_cmd adjustdom;
-+ struct dom0_setvcpucontext setvcpucontext;
-+ struct dom0_getdomaininfo getdomaininfo;
-+ struct dom0_getpageframeinfo getpageframeinfo;
-+ struct dom0_msr msr;
-+ struct dom0_settime settime;
-+ struct dom0_readconsole readconsole;
-+ struct dom0_setvcpuaffinity setvcpuaffinity;
-+ struct dom0_tbufcontrol tbufcontrol;
-+ struct dom0_physinfo physinfo;
-+ struct dom0_sched_id sched_id;
-+ struct dom0_shadow_control shadow_control;
-+ struct dom0_setdomainmaxmem setdomainmaxmem;
-+ struct dom0_getpageframeinfo2 getpageframeinfo2;
-+ struct dom0_add_memtype add_memtype;
-+ struct dom0_del_memtype del_memtype;
-+ struct dom0_read_memtype read_memtype;
-+ struct dom0_perfccontrol perfccontrol;
-+ struct dom0_microcode microcode;
-+ struct dom0_ioport_permission ioport_permission;
-+ struct dom0_getvcpucontext getvcpucontext;
-+ struct dom0_getvcpuinfo getvcpuinfo;
-+ struct dom0_getdomaininfolist getdomaininfolist;
-+ struct dom0_platform_quirk platform_quirk;
-+ struct dom0_physical_memory_map physical_memory_map;
-+ struct dom0_max_vcpus max_vcpus;
-+ struct dom0_setdomainhandle setdomainhandle;
-+ struct dom0_setdebugging setdebugging;
-+ struct dom0_irq_permission irq_permission;
-+ struct dom0_iomem_permission iomem_permission;
-+ struct dom0_hypercall_init hypercall_init;
-+ uint8_t pad[128];
-+ } u;
-+} dom0_op_t;
-+DEFINE_GUEST_HANDLE(dom0_op_t);
-+
-+#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
-new file mode 100644
-index 0000000..03d2039
---- /dev/null
-+++ b/include/xen/interface/event_channel.h
-@@ -0,0 +1,203 @@
-+/******************************************************************************
-+ * event_channel.h
-+ *
-+ * Event channels between domains.
-+ *
-+ * Copyright (c) 2003-2004, K A Fraser.
-+ */
-+
-+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
-+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
-+
-+typedef uint32_t evtchn_port_t;
-+
-+/*
-+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
-+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
-+ * is allocated in <dom> and returned as <port>.
-+ * NOTES:
-+ * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
-+ * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
-+ */
-+#define EVTCHNOP_alloc_unbound 6
-+typedef struct evtchn_alloc_unbound {
-+ /* IN parameters */
-+ domid_t dom, remote_dom;
-+ /* OUT parameters */
-+ evtchn_port_t port;
-+} evtchn_alloc_unbound_t;
-+
-+/*
-+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
-+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
-+ * a port that is unbound and marked as accepting bindings from the calling
-+ * domain. A fresh port is allocated in the calling domain and returned as
-+ * <local_port>.
-+ * NOTES:
-+ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
-+ */
-+#define EVTCHNOP_bind_interdomain 0
-+typedef struct evtchn_bind_interdomain {
-+ /* IN parameters. */
-+ domid_t remote_dom;
-+ evtchn_port_t remote_port;
-+ /* OUT parameters. */
-+ evtchn_port_t local_port;
-+} evtchn_bind_interdomain_t;
-+
-+/*
-+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
-+ * vcpu.
-+ * NOTES:
-+ * 1. A virtual IRQ may be bound to at most one event channel per vcpu.
-+ * 2. The allocated event channel is bound to the specified vcpu. The binding
-+ * may not be changed.
-+ */
-+#define EVTCHNOP_bind_virq 1
-+typedef struct evtchn_bind_virq {
-+ /* IN parameters. */
-+ uint32_t virq;
-+ uint32_t vcpu;
-+ /* OUT parameters. */
-+ evtchn_port_t port;
-+} evtchn_bind_virq_t;
-+
-+/*
-+ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
-+ * NOTES:
-+ * 1. A physical IRQ may be bound to at most one event channel per domain.
-+ * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
-+ */
-+#define EVTCHNOP_bind_pirq 2
-+typedef struct evtchn_bind_pirq {
-+ /* IN parameters. */
-+ uint32_t pirq;
-+#define BIND_PIRQ__WILL_SHARE 1
-+ uint32_t flags; /* BIND_PIRQ__* */
-+ /* OUT parameters. */
-+ evtchn_port_t port;
-+} evtchn_bind_pirq_t;
-+
-+/*
-+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
-+ * NOTES:
-+ * 1. The allocated event channel is bound to the specified vcpu. The binding
-+ * may not be changed.
-+ */
-+#define EVTCHNOP_bind_ipi 7
-+typedef struct evtchn_bind_ipi {
-+ uint32_t vcpu;
-+ /* OUT parameters. */
-+ evtchn_port_t port;
-+} evtchn_bind_ipi_t;
-+
-+/*
-+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
-+ * interdomain then the remote end is placed in the unbound state
-+ * (EVTCHNSTAT_unbound), awaiting a new connection.
-+ */
-+#define EVTCHNOP_close 3
-+typedef struct evtchn_close {
-+ /* IN parameters. */
-+ evtchn_port_t port;
-+} evtchn_close_t;
-+
-+/*
-+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
-+ * endpoint is <port>.
-+ */
-+#define EVTCHNOP_send 4
-+typedef struct evtchn_send {
-+ /* IN parameters. */
-+ evtchn_port_t port;
-+} evtchn_send_t;
-+
-+/*
-+ * EVTCHNOP_status: Get the current status of the communication channel which
-+ * has an endpoint at <dom, port>.
-+ * NOTES:
-+ * 1. <dom> may be specified as DOMID_SELF.
-+ * 2. Only a sufficiently-privileged domain may obtain the status of an event
-+ * channel for which <dom> is not DOMID_SELF.
-+ */
-+#define EVTCHNOP_status 5
-+typedef struct evtchn_status {
-+ /* IN parameters */
-+ domid_t dom;
-+ evtchn_port_t port;
-+ /* OUT parameters */
-+#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
-+#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
-+#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
-+#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
-+#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
-+#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
-+ uint32_t status;
-+ uint32_t vcpu; /* VCPU to which this channel is bound. */
-+ union {
-+ struct {
-+ domid_t dom;
-+ } unbound; /* EVTCHNSTAT_unbound */
-+ struct {
-+ domid_t dom;
-+ evtchn_port_t port;
-+ } interdomain; /* EVTCHNSTAT_interdomain */
-+ uint32_t pirq; /* EVTCHNSTAT_pirq */
-+ uint32_t virq; /* EVTCHNSTAT_virq */
-+ } u;
-+} evtchn_status_t;
-+
-+/*
-+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
-+ * event is pending.
-+ * NOTES:
-+ * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
-+ * the binding. This binding cannot be changed.
-+ * 2. All other channels notify vcpu0 by default. This default is set when
-+ * the channel is allocated (a port that is freed and subsequently reused
-+ * has its binding reset to vcpu0).
-+ */
-+#define EVTCHNOP_bind_vcpu 8
-+typedef struct evtchn_bind_vcpu {
-+ /* IN parameters. */
-+ evtchn_port_t port;
-+ uint32_t vcpu;
-+} evtchn_bind_vcpu_t;
-+
-+/*
-+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
-+ * a notification to the appropriate VCPU if an event is pending.
-+ */
-+#define EVTCHNOP_unmask 9
-+typedef struct evtchn_unmask {
-+ /* IN parameters. */
-+ evtchn_port_t port;
-+} evtchn_unmask_t;
-+
-+typedef struct evtchn_op {
-+ uint32_t cmd; /* EVTCHNOP_* */
-+ union {
-+ evtchn_alloc_unbound_t alloc_unbound;
-+ evtchn_bind_interdomain_t bind_interdomain;
-+ evtchn_bind_virq_t bind_virq;
-+ evtchn_bind_pirq_t bind_pirq;
-+ evtchn_bind_ipi_t bind_ipi;
-+ evtchn_close_t close;
-+ evtchn_send_t send;
-+ evtchn_status_t status;
-+ evtchn_bind_vcpu_t bind_vcpu;
-+ evtchn_unmask_t unmask;
-+ } u;
-+} evtchn_op_t;
-+
-+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
-new file mode 100644
-index 0000000..c46e3be
---- /dev/null
-+++ b/include/xen/interface/features.h
-@@ -0,0 +1,53 @@
-+/******************************************************************************
-+ * features.h
-+ *
-+ * Feature flags, reported by XENVER_get_features.
-+ *
-+ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_FEATURES_H__
-+#define __XEN_PUBLIC_FEATURES_H__
-+
-+/*
-+ * If set, the guest does not need to write-protect its pagetables, and can
-+ * update them via direct writes.
-+ */
-+#define XENFEAT_writable_page_tables 0
-+
-+/*
-+ * If set, the guest does not need to write-protect its segment descriptor
-+ * tables, and can update them via direct writes.
-+ */
-+#define XENFEAT_writable_descriptor_tables 1
-+
-+/*
-+ * If set, translation between the guest's 'pseudo-physical' address space
-+ * and the host's machine address space are handled by the hypervisor. In this
-+ * mode the guest does not need to perform phys-to/from-machine translations
-+ * when performing page table operations.
-+ */
-+#define XENFEAT_auto_translated_physmap 2
-+
-+/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
-+#define XENFEAT_supervisor_mode_kernel 3
-+
-+/*
-+ * If set, the guest does not need to allocate x86 PAE page directories
-+ * below 4GB. This flag is usually implied by auto_translated_physmap.
-+ */
-+#define XENFEAT_pae_pgdir_above_4gb 4
-+
-+#define XENFEAT_NR_SUBMAPS 1
-+
-+#endif /* __XEN_PUBLIC_FEATURES_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
-new file mode 100644
-index 0000000..e137953
---- /dev/null
-+++ b/include/xen/interface/grant_table.h
-@@ -0,0 +1,306 @@
-+/******************************************************************************
-+ * grant_table.h
-+ *
-+ * Interface for granting foreign access to page frames, and receiving
-+ * page-ownership transfers.
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
-+#define __XEN_PUBLIC_GRANT_TABLE_H__
-+
-+
-+/***********************************
-+ * GRANT TABLE REPRESENTATION
-+ */
-+
-+/* Some rough guidelines on accessing and updating grant-table entries
-+ * in a concurrency-safe manner. For more information, Linux contains a
-+ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
-+ *
-+ * NB. WMB is a no-op on current-generation x86 processors. However, a
-+ * compiler barrier will still be required.
-+ *
-+ * Introducing a valid entry into the grant table:
-+ * 1. Write ent->domid.
-+ * 2. Write ent->frame:
-+ * GTF_permit_access: Frame to which access is permitted.
-+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
-+ * frame, or zero if none.
-+ * 3. Write memory barrier (WMB).
-+ * 4. Write ent->flags, inc. valid type.
-+ *
-+ * Invalidating an unused GTF_permit_access entry:
-+ * 1. flags = ent->flags.
-+ * 2. Observe that !(flags & (GTF_reading|GTF_writing)).
-+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
-+ * NB. No need for WMB as reuse of entry is control-dependent on success of
-+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
-+ *
-+ * Invalidating an in-use GTF_permit_access entry:
-+ * This cannot be done directly. Request assistance from the domain controller
-+ * which can set a timeout on the use of a grant entry and take necessary
-+ * action. (NB. This is not yet implemented!).
-+ *
-+ * Invalidating an unused GTF_accept_transfer entry:
-+ * 1. flags = ent->flags.
-+ * 2. Observe that !(flags & GTF_transfer_committed). [*]
-+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
-+ * NB. No need for WMB as reuse of entry is control-dependent on success of
-+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
-+ * [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
-+ * The guest must /not/ modify the grant entry until the address of the
-+ * transferred frame is written. It is safe for the guest to spin waiting
-+ * for this to occur (detect by observing GTF_transfer_completed in
-+ * ent->flags).
-+ *
-+ * Invalidating a committed GTF_accept_transfer entry:
-+ * 1. Wait for (ent->flags & GTF_transfer_completed).
-+ *
-+ * Changing a GTF_permit_access from writable to read-only:
-+ * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
-+ *
-+ * Changing a GTF_permit_access from read-only to writable:
-+ * Use SMP-safe bit-setting instruction.
-+ */
-+
-+/*
-+ * A grant table comprises a packed array of grant entries in one or more
-+ * page frames shared between Xen and a guest.
-+ * [XEN]: This field is written by Xen and read by the sharing guest.
-+ * [GST]: This field is written by the guest and read by Xen.
-+ */
-+typedef struct grant_entry {
-+ /* GTF_xxx: various type and flag information. [XEN,GST] */
-+ uint16_t flags;
-+ /* The domain being granted foreign privileges. [GST] */
-+ domid_t domid;
-+ /*
-+ * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
-+ * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
-+ */
-+ uint32_t frame;
-+} grant_entry_t;
-+
-+/*
-+ * Type of grant entry.
-+ * GTF_invalid: This grant entry grants no privileges.
-+ * GTF_permit_access: Allow @domid to map/access @frame.
-+ * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
-+ * to this guest. Xen writes the page number to @frame.
-+ */
-+#define GTF_invalid (0U<<0)
-+#define GTF_permit_access (1U<<0)
-+#define GTF_accept_transfer (2U<<0)
-+#define GTF_type_mask (3U<<0)
-+
-+/*
-+ * Subflags for GTF_permit_access.
-+ * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
-+ * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
-+ * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
-+ */
-+#define _GTF_readonly (2)
-+#define GTF_readonly (1U<<_GTF_readonly)
-+#define _GTF_reading (3)
-+#define GTF_reading (1U<<_GTF_reading)
-+#define _GTF_writing (4)
-+#define GTF_writing (1U<<_GTF_writing)
-+
-+/*
-+ * Subflags for GTF_accept_transfer:
-+ * GTF_transfer_committed: Xen sets this flag to indicate that it is committed
-+ * to transferring ownership of a page frame. When a guest sees this flag
-+ * it must /not/ modify the grant entry until GTF_transfer_completed is
-+ * set by Xen.
-+ * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
-+ * after reading GTF_transfer_committed. Xen will always write the frame
-+ * address, followed by ORing this flag, in a timely manner.
-+ */
-+#define _GTF_transfer_committed (2)
-+#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
-+#define _GTF_transfer_completed (3)
-+#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
-+
-+
-+/***********************************
-+ * GRANT TABLE QUERIES AND USES
-+ */
-+
-+/*
-+ * Reference to a grant entry in a specified domain's grant table.
-+ */
-+typedef uint32_t grant_ref_t;
-+
-+/*
-+ * Handle to track a mapping created via a grant reference.
-+ */
-+typedef uint32_t grant_handle_t;
-+
-+/*
-+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
-+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
-+ * that must be presented later to destroy the mapping(s). On error, <handle>
-+ * is a negative status code.
-+ * NOTES:
-+ * 1. If GNTPIN_map_for_dev is specified then <dev_bus_addr> is the address
-+ * via which I/O devices may access the granted frame.
-+ * 2. If GNTPIN_map_for_host is specified then a mapping will be added at
-+ * either a host virtual address in the current address space, or at
-+ * a PTE at the specified machine address. The type of mapping to
-+ * perform is selected through the GNTMAP_contains_pte flag, and the
-+ * address is specified in <host_addr>.
-+ * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
-+ * host mapping is destroyed by other means then it is *NOT* guaranteed
-+ * to be accounted to the correct grant reference!
-+ */
-+#define GNTTABOP_map_grant_ref 0
-+typedef struct gnttab_map_grant_ref {
-+ /* IN parameters. */
-+ uint64_t host_addr;
-+ uint32_t flags; /* GNTMAP_* */
-+ grant_ref_t ref;
-+ domid_t dom;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+ grant_handle_t handle;
-+ uint64_t dev_bus_addr;
-+} gnttab_map_grant_ref_t;
-+
-+/*
-+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
-+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
-+ * field is ignored. If non-zero, they must refer to a device/host mapping
-+ * that is tracked by <handle>
-+ * NOTES:
-+ * 1. The call may fail in an undefined manner if either mapping is not
-+ * tracked by <handle>.
-+ * 3. After executing a batch of unmaps, it is guaranteed that no stale
-+ * mappings will remain in the device or host TLBs.
-+ */
-+#define GNTTABOP_unmap_grant_ref 1
-+typedef struct gnttab_unmap_grant_ref {
-+ /* IN parameters. */
-+ uint64_t host_addr;
-+ uint64_t dev_bus_addr;
-+ grant_handle_t handle;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+} gnttab_unmap_grant_ref_t;
-+
-+/*
-+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
-+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
-+ * Only <nr_frames> addresses are written, even if the table is larger.
-+ * NOTES:
-+ * 1. <dom> may be specified as DOMID_SELF.
-+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
-+ * 3. Xen may not support more than a single grant-table page per domain.
-+ */
-+#define GNTTABOP_setup_table 2
-+typedef struct gnttab_setup_table {
-+ /* IN parameters. */
-+ domid_t dom;
-+ uint32_t nr_frames;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+ unsigned long *frame_list;
-+} gnttab_setup_table_t;
-+
-+/*
-+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
-+ * xen console. Debugging use only.
-+ */
-+#define GNTTABOP_dump_table 3
-+typedef struct gnttab_dump_table {
-+ /* IN parameters. */
-+ domid_t dom;
-+ /* OUT parameters. */
-+ int16_t status; /* GNTST_* */
-+} gnttab_dump_table_t;
-+
-+/*
-+ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
-+ * foreign domain has previously registered its interest in the transfer via
-+ * <domid, ref>.
-+ *
-+ * Note that, even if the transfer fails, the specified page no longer belongs
-+ * to the calling domain *unless* the error is GNTST_bad_page.
-+ */
-+#define GNTTABOP_transfer 4
-+typedef struct gnttab_transfer {
-+ /* IN parameters. */
-+ unsigned long mfn;
-+ domid_t domid;
-+ grant_ref_t ref;
-+ /* OUT parameters. */
-+ int16_t status;
-+} gnttab_transfer_t;
-+
-+/*
-+ * Bitfield values for update_pin_status.flags.
-+ */
-+ /* Map the grant entry for access by I/O devices. */
-+#define _GNTMAP_device_map (0)
-+#define GNTMAP_device_map (1<<_GNTMAP_device_map)
-+ /* Map the grant entry for access by host CPUs. */
-+#define _GNTMAP_host_map (1)
-+#define GNTMAP_host_map (1<<_GNTMAP_host_map)
-+ /* Accesses to the granted frame will be restricted to read-only access. */
-+#define _GNTMAP_readonly (2)
-+#define GNTMAP_readonly (1<<_GNTMAP_readonly)
-+ /*
-+ * GNTMAP_host_map subflag:
-+ * 0 => The host mapping is usable only by the guest OS.
-+ * 1 => The host mapping is usable by guest OS + current application.
-+ */
-+#define _GNTMAP_application_map (3)
-+#define GNTMAP_application_map (1<<_GNTMAP_application_map)
-+
-+ /*
-+ * GNTMAP_contains_pte subflag:
-+ * 0 => This map request contains a host virtual address.
-+ * 1 => This map request contains the machine addess of the PTE to update.
-+ */
-+#define _GNTMAP_contains_pte (4)
-+#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
-+
-+/*
-+ * Values for error status returns. All errors are -ve.
-+ */
-+#define GNTST_okay (0) /* Normal return. */
-+#define GNTST_general_error (-1) /* General undefined error. */
-+#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
-+#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
-+#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
-+#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
-+#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
-+#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
-+#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
-+#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
-+
-+#define GNTTABOP_error_msgs { \
-+ "okay", \
-+ "undefined error", \
-+ "unrecognised domain id", \
-+ "invalid grant reference", \
-+ "invalid mapping handle", \
-+ "invalid virtual address", \
-+ "invalid device address", \
-+ "no spare translation slot in the I/O MMU", \
-+ "permission denied", \
-+ "bad page" \
-+}
-+
-+#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/hvm/hvm_info_table.h b/include/xen/interface/hvm/hvm_info_table.h
-new file mode 100644
-index 0000000..3891180
---- /dev/null
-+++ b/include/xen/interface/hvm/hvm_info_table.h
-@@ -0,0 +1,24 @@
-+/******************************************************************************
-+ * hvm/hvm_info_table.h
-+ *
-+ * HVM parameter and information table, written into guest memory map.
-+ */
-+
-+#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
-+#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
-+
-+#define HVM_INFO_PFN 0x09F
-+#define HVM_INFO_OFFSET 0x800
-+#define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
-+
-+struct hvm_info_table {
-+ char signature[8]; /* "HVM INFO" */
-+ uint32_t length;
-+ uint8_t checksum;
-+ uint8_t acpi_enabled;
-+ uint8_t apic_enabled;
-+ uint8_t pae_enabled;
-+ uint32_t nr_vcpus;
-+};
-+
-+#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
-diff --git a/include/xen/interface/hvm/ioreq.h b/include/xen/interface/hvm/ioreq.h
-new file mode 100644
-index 0000000..4897ba3
---- /dev/null
-+++ b/include/xen/interface/hvm/ioreq.h
-@@ -0,0 +1,93 @@
-+/*
-+ * ioreq.h: I/O request definitions for device models
-+ * Copyright (c) 2004, Intel Corporation.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
-+ * Place - Suite 330, Boston, MA 02111-1307 USA.
-+ *
-+ */
-+
-+#ifndef _IOREQ_H_
-+#define _IOREQ_H_
-+
-+#define IOREQ_READ 1
-+#define IOREQ_WRITE 0
-+
-+#define STATE_INVALID 0
-+#define STATE_IOREQ_READY 1
-+#define STATE_IOREQ_INPROCESS 2
-+#define STATE_IORESP_READY 3
-+#define STATE_IORESP_HOOK 4
-+
-+#define IOREQ_TYPE_PIO 0 /* pio */
-+#define IOREQ_TYPE_COPY 1 /* mmio ops */
-+#define IOREQ_TYPE_AND 2
-+#define IOREQ_TYPE_OR 3
-+#define IOREQ_TYPE_XOR 4
-+
-+/*
-+ * VMExit dispatcher should cooperate with instruction decoder to
-+ * prepare this structure and notify service OS and DM by sending
-+ * virq
-+ */
-+typedef struct {
-+ uint64_t addr; /* physical address */
-+ uint64_t size; /* size in bytes */
-+ uint64_t count; /* for rep prefixes */
-+ union {
-+ uint64_t data; /* data */
-+ void *pdata; /* pointer to data */
-+ } u;
-+ uint8_t state:4;
-+ uint8_t pdata_valid:1; /* if 1, use pdata above */
-+ uint8_t dir:1; /* 1=read, 0=write */
-+ uint8_t df:1;
-+ uint8_t type; /* I/O type */
-+ uint64_t io_count; /* How many IO done on a vcpu */
-+} ioreq_t;
-+
-+#define MAX_VECTOR 256
-+#define BITS_PER_BYTE 8
-+#define INTR_LEN (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint64_t)))
-+#define INTR_LEN_32 (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint32_t)))
-+
-+typedef struct {
-+ uint16_t pic_elcr;
-+ uint16_t pic_irr;
-+ uint16_t pic_last_irr;
-+ uint16_t pic_clear_irr;
-+} global_iodata_t;
-+
-+typedef struct {
-+ ioreq_t vp_ioreq;
-+ /* Event channel port */
-+ unsigned int vp_eport; /* VMX vcpu uses this to notify DM */
-+ unsigned int dm_eport; /* DM uses this to notify VMX vcpu */
-+} vcpu_iodata_t;
-+
-+typedef struct {
-+ global_iodata_t sp_global;
-+ vcpu_iodata_t vcpu_iodata[1];
-+} shared_iopage_t;
-+
-+#endif /* _IOREQ_H_ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/hvm/vmx_assist.h b/include/xen/interface/hvm/vmx_assist.h
-new file mode 100644
-index 0000000..9ef6648
---- /dev/null
-+++ b/include/xen/interface/hvm/vmx_assist.h
-@@ -0,0 +1,97 @@
-+/*
-+ * vmx_assist.h: Context definitions for the VMXASSIST world switch.
-+ *
-+ * Leendert van Doorn, leendert@watson.ibm.com
-+ * Copyright (c) 2005, International Business Machines Corporation.
-+ */
-+
-+#ifndef _VMX_ASSIST_H_
-+#define _VMX_ASSIST_H_
-+
-+#define VMXASSIST_BASE 0xD0000
-+#define VMXASSIST_MAGIC 0x17101966
-+#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
-+
-+#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
-+#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
-+
-+#ifndef __ASSEMBLY__
-+
-+union vmcs_arbytes {
-+ struct arbyte_fields {
-+ unsigned int seg_type : 4,
-+ s : 1,
-+ dpl : 2,
-+ p : 1,
-+ reserved0 : 4,
-+ avl : 1,
-+ reserved1 : 1,
-+ default_ops_size: 1,
-+ g : 1,
-+ null_bit : 1,
-+ reserved2 : 15;
-+ } fields;
-+ unsigned int bytes;
-+};
-+
-+/*
-+ * World switch state
-+ */
-+typedef struct vmx_assist_context {
-+ uint32_t eip; /* execution pointer */
-+ uint32_t esp; /* stack pointer */
-+ uint32_t eflags; /* flags register */
-+ uint32_t cr0;
-+ uint32_t cr3; /* page table directory */
-+ uint32_t cr4;
-+ uint32_t idtr_limit; /* idt */
-+ uint32_t idtr_base;
-+ uint32_t gdtr_limit; /* gdt */
-+ uint32_t gdtr_base;
-+ uint32_t cs_sel; /* cs selector */
-+ uint32_t cs_limit;
-+ uint32_t cs_base;
-+ union vmcs_arbytes cs_arbytes;
-+ uint32_t ds_sel; /* ds selector */
-+ uint32_t ds_limit;
-+ uint32_t ds_base;
-+ union vmcs_arbytes ds_arbytes;
-+ uint32_t es_sel; /* es selector */
-+ uint32_t es_limit;
-+ uint32_t es_base;
-+ union vmcs_arbytes es_arbytes;
-+ uint32_t ss_sel; /* ss selector */
-+ uint32_t ss_limit;
-+ uint32_t ss_base;
-+ union vmcs_arbytes ss_arbytes;
-+ uint32_t fs_sel; /* fs selector */
-+ uint32_t fs_limit;
-+ uint32_t fs_base;
-+ union vmcs_arbytes fs_arbytes;
-+ uint32_t gs_sel; /* gs selector */
-+ uint32_t gs_limit;
-+ uint32_t gs_base;
-+ union vmcs_arbytes gs_arbytes;
-+ uint32_t tr_sel; /* task selector */
-+ uint32_t tr_limit;
-+ uint32_t tr_base;
-+ union vmcs_arbytes tr_arbytes;
-+ uint32_t ldtr_sel; /* ldtr selector */
-+ uint32_t ldtr_limit;
-+ uint32_t ldtr_base;
-+ union vmcs_arbytes ldtr_arbytes;
-+} vmx_assist_context_t;
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* _VMX_ASSIST_H_ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
-new file mode 100644
-index 0000000..bb92917
---- /dev/null
-+++ b/include/xen/interface/io/blkif.h
-@@ -0,0 +1,85 @@
-+/******************************************************************************
-+ * blkif.h
-+ *
-+ * Unified block-device I/O interface for Xen guest OSes.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_BLKIF_H__
-+#define __XEN_PUBLIC_IO_BLKIF_H__
-+
-+#include "ring.h"
-+#include "../grant_table.h"
-+
-+/*
-+ * Front->back notifications: When enqueuing a new request, sending a
-+ * notification can be made conditional on req_event (i.e., the generic
-+ * hold-off mechanism provided by the ring macros). Backends must set
-+ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
-+ *
-+ * Back->front notifications: When enqueuing a new response, sending a
-+ * notification can be made conditional on rsp_event (i.e., the generic
-+ * hold-off mechanism provided by the ring macros). Frontends must set
-+ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
-+ */
-+
-+#ifndef blkif_vdev_t
-+#define blkif_vdev_t uint16_t
-+#endif
-+#define blkif_sector_t uint64_t
-+
-+#define BLKIF_OP_READ 0
-+#define BLKIF_OP_WRITE 1
-+
-+/*
-+ * Maximum scatter/gather segments per request.
-+ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
-+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
-+ */
-+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
-+
-+typedef struct blkif_request {
-+ uint8_t operation; /* BLKIF_OP_??? */
-+ uint8_t nr_segments; /* number of segments */
-+ blkif_vdev_t handle; /* only for read/write requests */
-+ uint64_t id; /* private guest value, echoed in resp */
-+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
-+ struct blkif_request_segment {
-+ grant_ref_t gref; /* reference to I/O buffer frame */
-+ /* @first_sect: first sector in frame to transfer (inclusive). */
-+ /* @last_sect: last sector in frame to transfer (inclusive). */
-+ uint8_t first_sect, last_sect;
-+ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+} blkif_request_t;
-+
-+typedef struct blkif_response {
-+ uint64_t id; /* copied from request */
-+ uint8_t operation; /* copied from request */
-+ int16_t status; /* BLKIF_RSP_??? */
-+} blkif_response_t;
-+
-+#define BLKIF_RSP_ERROR -1 /* non-specific 'error' */
-+#define BLKIF_RSP_OKAY 0 /* non-specific 'okay' */
-+
-+/*
-+ * Generate blkif ring structures and types.
-+ */
-+
-+DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t);
-+
-+#define VDISK_CDROM 0x1
-+#define VDISK_REMOVABLE 0x2
-+#define VDISK_READONLY 0x4
-+
-+#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h
-new file mode 100644
-index 0000000..cb59b24
---- /dev/null
-+++ b/include/xen/interface/io/console.h
-@@ -0,0 +1,33 @@
-+/******************************************************************************
-+ * console.h
-+ *
-+ * Console I/O interface for Xen guest OSes.
-+ *
-+ * Copyright (c) 2005, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
-+#define __XEN_PUBLIC_IO_CONSOLE_H__
-+
-+typedef uint32_t XENCONS_RING_IDX;
-+
-+#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
-+
-+struct xencons_interface {
-+ char in[1024];
-+ char out[2048];
-+ XENCONS_RING_IDX in_cons, in_prod;
-+ XENCONS_RING_IDX out_cons, out_prod;
-+};
-+
-+#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
-new file mode 100644
-index 0000000..dcb9b46
---- /dev/null
-+++ b/include/xen/interface/io/netif.h
-@@ -0,0 +1,76 @@
-+/******************************************************************************
-+ * netif.h
-+ *
-+ * Unified network-device I/O interface for Xen guest OSes.
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_NETIF_H__
-+#define __XEN_PUBLIC_IO_NETIF_H__
-+
-+#include "ring.h"
-+#include "../grant_table.h"
-+
-+/*
-+ * Note that there is *never* any need to notify the backend when enqueuing
-+ * receive requests (netif_rx_request_t). Notifications after enqueuing any
-+ * other type of message should be conditional on the appropriate req_event
-+ * or rsp_event field in the shared ring.
-+ */
-+
-+/* Protocol checksum field is blank in the packet (hardware offload)? */
-+#define _NETTXF_csum_blank (0)
-+#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
-+
-+typedef struct netif_tx_request {
-+ grant_ref_t gref; /* Reference to buffer page */
-+ uint16_t offset; /* Offset within buffer page */
-+ uint16_t flags; /* NETTXF_* */
-+ uint16_t id; /* Echoed in response message. */
-+ uint16_t size; /* Packet size in bytes. */
-+} netif_tx_request_t;
-+
-+typedef struct netif_tx_response {
-+ uint16_t id;
-+ int16_t status; /* NETIF_RSP_* */
-+} netif_tx_response_t;
-+
-+typedef struct {
-+ uint16_t id; /* Echoed in response message. */
-+ grant_ref_t gref; /* Reference to incoming granted frame */
-+} netif_rx_request_t;
-+
-+/* Protocol checksum already validated (e.g., performed by hardware)? */
-+#define _NETRXF_csum_valid (0)
-+#define NETRXF_csum_valid (1U<<_NETRXF_csum_valid)
-+
-+typedef struct {
-+ uint16_t id;
-+ uint16_t offset; /* Offset in page of start of received packet */
-+ uint16_t flags; /* NETRXF_* */
-+ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
-+} netif_rx_response_t;
-+
-+/*
-+ * Generate netif ring structures and types.
-+ */
-+
-+DEFINE_RING_TYPES(netif_tx, netif_tx_request_t, netif_tx_response_t);
-+DEFINE_RING_TYPES(netif_rx, netif_rx_request_t, netif_rx_response_t);
-+
-+#define NETIF_RSP_DROPPED -2
-+#define NETIF_RSP_ERROR -1
-+#define NETIF_RSP_OKAY 0
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/io/pciif.h b/include/xen/interface/io/pciif.h
-new file mode 100644
-index 0000000..a1c9ab7
---- /dev/null
-+++ b/include/xen/interface/io/pciif.h
-@@ -0,0 +1,55 @@
-+/*
-+ * PCI Backend/Frontend Common Data Structures & Macros
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_PCI_COMMON_H__
-+#define __XEN_PCI_COMMON_H__
-+
-+/* Be sure to bump this number if you change this file */
-+#define XEN_PCI_MAGIC "7"
-+
-+/* xen_pci_sharedinfo flags */
-+#define _XEN_PCIF_active (0)
-+#define XEN_PCIF_active (1<<_XEN_PCI_active)
-+
-+/* xen_pci_op commands */
-+#define XEN_PCI_OP_conf_read (0)
-+#define XEN_PCI_OP_conf_write (1)
-+
-+/* xen_pci_op error numbers */
-+#define XEN_PCI_ERR_success (0)
-+#define XEN_PCI_ERR_dev_not_found (-1)
-+#define XEN_PCI_ERR_invalid_offset (-2)
-+#define XEN_PCI_ERR_access_denied (-3)
-+#define XEN_PCI_ERR_not_implemented (-4)
-+/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */
-+#define XEN_PCI_ERR_op_failed (-5)
-+
-+struct xen_pci_op {
-+ /* IN: what action to perform: XEN_PCI_OP_* */
-+ uint32_t cmd;
-+
-+ /* OUT: will contain an error number (if any) from errno.h */
-+ int32_t err;
-+
-+ /* IN: which device to touch */
-+ uint32_t domain; /* PCI Domain/Segment */
-+ uint32_t bus;
-+ uint32_t devfn;
-+
-+ /* IN: which configuration registers to touch */
-+ int32_t offset;
-+ int32_t size;
-+
-+ /* IN/OUT: Contains the result after a READ or the value to WRITE */
-+ uint32_t value;
-+};
-+
-+struct xen_pci_sharedinfo {
-+ /* flags - XEN_PCIF_* */
-+ uint32_t flags;
-+ struct xen_pci_op op;
-+};
-+
-+#endif /* __XEN_PCI_COMMON_H__ */
-diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
-new file mode 100644
-index 0000000..d9aac23
---- /dev/null
-+++ b/include/xen/interface/io/ring.h
-@@ -0,0 +1,265 @@
-+/******************************************************************************
-+ * ring.h
-+ *
-+ * Shared producer-consumer ring macros.
-+ *
-+ * Tim Deegan and Andrew Warfield November 2004.
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_RING_H__
-+#define __XEN_PUBLIC_IO_RING_H__
-+
-+typedef unsigned int RING_IDX;
-+
-+/* Round a 32-bit unsigned constant down to the nearest power of two. */
-+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
-+#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
-+#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
-+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
-+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
-+
-+/*
-+ * Calculate size of a shared ring, given the total available space for the
-+ * ring and indexes (_sz), and the name tag of the request/response structure.
-+ * A ring contains as many entries as will fit, rounded down to the nearest
-+ * power of two (so we can mask with (size-1) to loop around).
-+ */
-+#define __RING_SIZE(_s, _sz) \
-+ (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
-+
-+/*
-+ * Macros to make the correct C datatypes for a new kind of ring.
-+ *
-+ * To make a new ring datatype, you need to have two message structures,
-+ * let's say request_t, and response_t already defined.
-+ *
-+ * In a header where you want the ring datatype declared, you then do:
-+ *
-+ * DEFINE_RING_TYPES(mytag, request_t, response_t);
-+ *
-+ * These expand out to give you a set of types, as you can see below.
-+ * The most important of these are:
-+ *
-+ * mytag_sring_t - The shared ring.
-+ * mytag_front_ring_t - The 'front' half of the ring.
-+ * mytag_back_ring_t - The 'back' half of the ring.
-+ *
-+ * To initialize a ring in your code you need to know the location and size
-+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
-+ * the front half:
-+ *
-+ * mytag_front_ring_t front_ring;
-+ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
-+ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
-+ *
-+ * Initializing the back follows similarly (note that only the front
-+ * initializes the shared ring):
-+ *
-+ * mytag_back_ring_t back_ring;
-+ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
-+ */
-+
-+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
-+ \
-+/* Shared ring entry */ \
-+union __name##_sring_entry { \
-+ __req_t req; \
-+ __rsp_t rsp; \
-+}; \
-+ \
-+/* Shared ring page */ \
-+struct __name##_sring { \
-+ RING_IDX req_prod, req_event; \
-+ RING_IDX rsp_prod, rsp_event; \
-+ uint8_t pad[48]; \
-+ union __name##_sring_entry ring[1]; /* variable-length */ \
-+}; \
-+ \
-+/* "Front" end's private variables */ \
-+struct __name##_front_ring { \
-+ RING_IDX req_prod_pvt; \
-+ RING_IDX rsp_cons; \
-+ unsigned int nr_ents; \
-+ struct __name##_sring *sring; \
-+}; \
-+ \
-+/* "Back" end's private variables */ \
-+struct __name##_back_ring { \
-+ RING_IDX rsp_prod_pvt; \
-+ RING_IDX req_cons; \
-+ unsigned int nr_ents; \
-+ struct __name##_sring *sring; \
-+}; \
-+ \
-+/* Syntactic sugar */ \
-+typedef struct __name##_sring __name##_sring_t; \
-+typedef struct __name##_front_ring __name##_front_ring_t; \
-+typedef struct __name##_back_ring __name##_back_ring_t
-+
-+/*
-+ * Macros for manipulating rings.
-+ *
-+ * FRONT_RING_whatever works on the "front end" of a ring: here
-+ * requests are pushed on to the ring and responses taken off it.
-+ *
-+ * BACK_RING_whatever works on the "back end" of a ring: here
-+ * requests are taken off the ring and responses put on.
-+ *
-+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
-+ * This is OK in 1-for-1 request-response situations where the
-+ * requestor (front end) never has more than RING_SIZE()-1
-+ * outstanding requests.
-+ */
-+
-+/* Initialising empty rings */
-+#define SHARED_RING_INIT(_s) do { \
-+ (_s)->req_prod = (_s)->rsp_prod = 0; \
-+ (_s)->req_event = (_s)->rsp_event = 1; \
-+ memset((_s)->pad, 0, sizeof((_s)->pad)); \
-+} while(0)
-+
-+#define FRONT_RING_INIT(_r, _s, __size) do { \
-+ (_r)->req_prod_pvt = 0; \
-+ (_r)->rsp_cons = 0; \
-+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
-+ (_r)->sring = (_s); \
-+} while (0)
-+
-+#define BACK_RING_INIT(_r, _s, __size) do { \
-+ (_r)->rsp_prod_pvt = 0; \
-+ (_r)->req_cons = 0; \
-+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
-+ (_r)->sring = (_s); \
-+} while (0)
-+
-+/* Initialize to existing shared indexes -- for recovery */
-+#define FRONT_RING_ATTACH(_r, _s, __size) do { \
-+ (_r)->sring = (_s); \
-+ (_r)->req_prod_pvt = (_s)->req_prod; \
-+ (_r)->rsp_cons = (_s)->rsp_prod; \
-+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
-+} while (0)
-+
-+#define BACK_RING_ATTACH(_r, _s, __size) do { \
-+ (_r)->sring = (_s); \
-+ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
-+ (_r)->req_cons = (_s)->req_prod; \
-+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
-+} while (0)
-+
-+/* How big is this ring? */
-+#define RING_SIZE(_r) \
-+ ((_r)->nr_ents)
-+
-+/* Test if there is an empty slot available on the front ring.
-+ * (This is only meaningful from the front. )
-+ */
-+#define RING_FULL(_r) \
-+ (((_r)->req_prod_pvt - (_r)->rsp_cons) == RING_SIZE(_r))
-+
-+/* Test if there are outstanding messages to be processed on a ring. */
-+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
-+ ((_r)->rsp_cons != (_r)->sring->rsp_prod)
-+
-+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
-+ (((_r)->req_cons != (_r)->sring->req_prod) && \
-+ (((_r)->req_cons - (_r)->rsp_prod_pvt) != RING_SIZE(_r)))
-+
-+/* Direct access to individual ring elements, by index. */
-+#define RING_GET_REQUEST(_r, _idx) \
-+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
-+
-+#define RING_GET_RESPONSE(_r, _idx) \
-+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
-+
-+/* Loop termination condition: Would the specified index overflow the ring? */
-+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
-+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
-+
-+#define RING_PUSH_REQUESTS(_r) do { \
-+ wmb(); /* back sees requests /before/ updated producer index */ \
-+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
-+} while (0)
-+
-+#define RING_PUSH_RESPONSES(_r) do { \
-+ wmb(); /* front sees responses /before/ updated producer index */ \
-+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
-+} while (0)
-+
-+/*
-+ * Notification hold-off (req_event and rsp_event):
-+ *
-+ * When queueing requests or responses on a shared ring, it may not always be
-+ * necessary to notify the remote end. For example, if requests are in flight
-+ * in a backend, the front may be able to queue further requests without
-+ * notifying the back (if the back checks for new requests when it queues
-+ * responses).
-+ *
-+ * When enqueuing requests or responses:
-+ *
-+ * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
-+ * is a boolean return value. True indicates that the receiver requires an
-+ * asynchronous notification.
-+ *
-+ * After dequeuing requests or responses (before sleeping the connection):
-+ *
-+ * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
-+ * The second argument is a boolean return value. True indicates that there
-+ * are pending messages on the ring (i.e., the connection should not be put
-+ * to sleep).
-+ *
-+ * These macros will set the req_event/rsp_event field to trigger a
-+ * notification on the very next message that is enqueued. If you want to
-+ * create batches of work (i.e., only receive a notification after several
-+ * messages have been enqueued) then you will need to create a customised
-+ * version of the FINAL_CHECK macro in your own code, which sets the event
-+ * field appropriately.
-+ */
-+
-+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
-+ RING_IDX __old = (_r)->sring->req_prod; \
-+ RING_IDX __new = (_r)->req_prod_pvt; \
-+ wmb(); /* back sees requests /before/ updated producer index */ \
-+ (_r)->sring->req_prod = __new; \
-+ mb(); /* back sees new requests /before/ we check req_event */ \
-+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
-+ (RING_IDX)(__new - __old)); \
-+} while (0)
-+
-+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
-+ RING_IDX __old = (_r)->sring->rsp_prod; \
-+ RING_IDX __new = (_r)->rsp_prod_pvt; \
-+ wmb(); /* front sees responses /before/ updated producer index */ \
-+ (_r)->sring->rsp_prod = __new; \
-+ mb(); /* front sees new responses /before/ we check rsp_event */ \
-+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
-+ (RING_IDX)(__new - __old)); \
-+} while (0)
-+
-+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
-+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
-+ if (_work_to_do) break; \
-+ (_r)->sring->req_event = (_r)->req_cons + 1; \
-+ mb(); \
-+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
-+} while (0)
-+
-+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
-+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
-+ if (_work_to_do) break; \
-+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
-+ mb(); \
-+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
-+} while (0)
-+
-+#endif /* __XEN_PUBLIC_IO_RING_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h
-new file mode 100644
-index 0000000..6d02f33
---- /dev/null
-+++ b/include/xen/interface/io/tpmif.h
-@@ -0,0 +1,56 @@
-+/******************************************************************************
-+ * tpmif.h
-+ *
-+ * TPM I/O interface for Xen guest OSes.
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb@us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from tools/libxc/xen/io/netif.h
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_TPMIF_H__
-+#define __XEN_PUBLIC_IO_TPMIF_H__
-+
-+#include "../grant_table.h"
-+
-+typedef struct {
-+ unsigned long addr; /* Machine address of packet. */
-+ grant_ref_t ref; /* grant table access reference */
-+ uint16_t unused;
-+ uint16_t size; /* Packet size in bytes. */
-+} tpmif_tx_request_t;
-+
-+/*
-+ * The TPMIF_TX_RING_SIZE defines the number of pages the
-+ * front-end and backend can exchange (= size of array).
-+ */
-+typedef uint32_t TPMIF_RING_IDX;
-+
-+#define TPMIF_TX_RING_SIZE 10
-+
-+/* This structure must fit in a memory page. */
-+
-+typedef struct {
-+ tpmif_tx_request_t req;
-+} tpmif_ring_t;
-+
-+typedef struct {
-+ tpmif_ring_t ring[TPMIF_TX_RING_SIZE];
-+} tpmif_tx_interface_t;
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/io/vmx_vlapic.h b/include/xen/interface/io/vmx_vlapic.h
-new file mode 100644
-index 0000000..f63a9aa
---- /dev/null
-+++ b/include/xen/interface/io/vmx_vlapic.h
-@@ -0,0 +1,58 @@
-+#ifndef _VMX_VLAPIC_H
-+#define _VMX_VLAPIC_H
-+
-+/*
-+ We extended one bit for PIC type
-+ */
-+#define VLAPIC_DELIV_MODE_FIXED 0x0
-+#define VLAPIC_DELIV_MODE_LPRI 0x1
-+#define VLAPIC_DELIV_MODE_SMI 0x2
-+#define VLAPIC_DELIV_MODE_NMI 0x4
-+#define VLAPIC_DELIV_MODE_INIT 0x5
-+#define VLAPIC_DELIV_MODE_STARTUP 0x6
-+#define VLAPIC_DELIV_MODE_EXT 0x7
-+#define VLAPIC_DELIV_MODE_MASK 0x8
-+
-+#define VLAPIC_MSG_LEVEL 4
-+
-+#define INTR_EXT 0
-+#define INTR_APIC 1
-+#define INTR_LAPIC 2
-+
-+#define VL_STATE_EOI 1
-+#define VL_STATE_EXT_LOCK 2
-+#define VL_STATE_MSG_LOCK 3
-+#define VL_STATE_EOI_LOCK 3
-+
-+#define VLOCAL_APIC_MAX_INTS 256
-+#define VLAPIC_INT_COUNT (VLOCAL_APIC_MAX_INTS/(BITS_PER_BYTE * sizeof(uint64_t)))
-+#define VLAPIC_INT_COUNT_32 (VLOCAL_APIC_MAX_INTS/(BITS_PER_BYTE * sizeof(uint32_t)))
-+
-+typedef struct {
-+ /* interrupt for PIC and ext type IOAPIC interrupt */
-+ uint64_t vl_ext_intr[VLAPIC_INT_COUNT];
-+ uint64_t vl_ext_intr_mask[VLAPIC_INT_COUNT];
-+ uint64_t vl_apic_intr[VLAPIC_INT_COUNT];
-+ uint64_t vl_apic_tmr[VLAPIC_INT_COUNT];
-+ uint64_t vl_eoi[VLAPIC_INT_COUNT];
-+ uint32_t vl_lapic_id;
-+ uint32_t direct_intr;
-+ uint32_t vl_apr;
-+ uint32_t vl_logical_dest;
-+ uint32_t vl_dest_format;
-+ uint32_t vl_arb_id;
-+ uint32_t vl_state;
-+ uint32_t apic_msg_count;
-+} vlapic_info;
-+
-+#endif /* _VMX_VLAPIC_H_ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/io/vmx_vpic.h b/include/xen/interface/io/vmx_vpic.h
-new file mode 100644
-index 0000000..256ac87
---- /dev/null
-+++ b/include/xen/interface/io/vmx_vpic.h
-@@ -0,0 +1,85 @@
-+/*
-+ * QEMU System Emulator header
-+ *
-+ * Copyright (c) 2003 Fabrice Bellard
-+ * Copyright (c) 2005 Intel Corp
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to deal
-+ * in the Software without restriction, including without limitation the rights
-+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+ * copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-+ * THE SOFTWARE.
-+ */
-+
-+#ifndef _VMX_VPIC_H
-+#define _VMX_VPIC_H
-+
-+#define hw_error(x) do {} while (0);
-+
-+
-+/* i8259.c */
-+typedef struct IOAPICState IOAPICState;
-+typedef struct PicState {
-+ uint8_t last_irr; /* edge detection */
-+ uint8_t irr; /* interrupt request register */
-+ uint8_t imr; /* interrupt mask register */
-+ uint8_t isr; /* interrupt service register */
-+ uint8_t priority_add; /* highest irq priority */
-+ uint8_t irq_base;
-+ uint8_t read_reg_select;
-+ uint8_t poll;
-+ uint8_t special_mask;
-+ uint8_t init_state;
-+ uint8_t auto_eoi;
-+ uint8_t rotate_on_auto_eoi;
-+ uint8_t special_fully_nested_mode;
-+ uint8_t init4; /* true if 4 byte init */
-+ uint8_t elcr; /* PIIX edge/trigger selection*/
-+ uint8_t elcr_mask;
-+ struct vmx_virpic *pics_state;
-+} PicState;
-+
-+struct vmx_virpic {
-+ /* 0 is master pic, 1 is slave pic */
-+ /* XXX: better separation between the two pics */
-+ PicState pics[2];
-+ void (*irq_request)(int *opaque, int level);
-+ void *irq_request_opaque;
-+ /* IOAPIC callback support */
-+ void (*alt_irq_func)(void *opaque, int irq_num, int level);
-+ void *alt_irq_opaque;
-+};
-+
-+
-+void pic_set_irq(struct vmx_virpic *s, int irq, int level);
-+void pic_set_irq_new(void *opaque, int irq, int level);
-+void pic_init(struct vmx_virpic *s,
-+ void (*irq_request)(),
-+ void *irq_request_opaque);
-+void pic_set_alt_irq_func(struct vmx_virpic *s,
-+ void(*alt_irq_func)(),
-+ void *alt_irq_opaque);
-+int pic_read_irq(struct vmx_virpic *s);
-+void pic_update_irq(struct vmx_virpic *s);
-+uint32_t pic_intack_read(struct vmx_virpic *s);
-+void register_pic_io_hook (void);
-+int cpu_get_pic_interrupt(struct vcpu *v, int *type);
-+int is_pit_irq(struct vcpu *v, int irq, int type);
-+int is_irq_enabled(struct vcpu *v, int irq);
-+void do_pic_irqs (struct vmx_virpic *s, uint16_t irqs);
-+void do_pic_irqs_clear (struct vmx_virpic *s, uint16_t irqs);
-+
-+/* APIC */
-+#endif /* _VMX_VPIC_H */
-diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h
-new file mode 100644
-index 0000000..472079c
---- /dev/null
-+++ b/include/xen/interface/io/xenbus.h
-@@ -0,0 +1,42 @@
-+/*****************************************************************************
-+ * xenbus.h
-+ *
-+ * Xenbus protocol details.
-+ *
-+ * Copyright (C) 2005 XenSource Ltd.
-+ */
-+
-+#ifndef _XEN_PUBLIC_IO_XENBUS_H
-+#define _XEN_PUBLIC_IO_XENBUS_H
-+
-+/* The state of either end of the Xenbus, i.e. the current communication
-+ status of initialisation across the bus. States here imply nothing about
-+ the state of the connection between the driver and the kernel's device
-+ layers. */
-+typedef enum
-+{
-+ XenbusStateUnknown = 0,
-+ XenbusStateInitialising = 1,
-+ XenbusStateInitWait = 2, /* Finished early initialisation, but waiting
-+ for information from the peer or hotplug
-+ scripts. */
-+ XenbusStateInitialised = 3, /* Initialised and waiting for a connection
-+ from the peer. */
-+ XenbusStateConnected = 4,
-+ XenbusStateClosing = 5, /* The device is being closed due to an error
-+ or an unplug event. */
-+ XenbusStateClosed = 6
-+
-+} XenbusState;
-+
-+#endif /* _XEN_PUBLIC_IO_XENBUS_H */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
-new file mode 100644
-index 0000000..3c642fd
---- /dev/null
-+++ b/include/xen/interface/io/xs_wire.h
-@@ -0,0 +1,97 @@
-+/*
-+ * Details of the "wire" protocol between Xen Store Daemon and client
-+ * library or guest kernel.
-+ * Copyright (C) 2005 Rusty Russell IBM Corporation
-+ */
-+
-+#ifndef _XS_WIRE_H
-+#define _XS_WIRE_H
-+
-+enum xsd_sockmsg_type
-+{
-+ XS_DEBUG,
-+ XS_DIRECTORY,
-+ XS_READ,
-+ XS_GET_PERMS,
-+ XS_WATCH,
-+ XS_UNWATCH,
-+ XS_TRANSACTION_START,
-+ XS_TRANSACTION_END,
-+ XS_INTRODUCE,
-+ XS_RELEASE,
-+ XS_GET_DOMAIN_PATH,
-+ XS_WRITE,
-+ XS_MKDIR,
-+ XS_RM,
-+ XS_SET_PERMS,
-+ XS_WATCH_EVENT,
-+ XS_ERROR,
-+ XS_IS_DOMAIN_INTRODUCED
-+};
-+
-+#define XS_WRITE_NONE "NONE"
-+#define XS_WRITE_CREATE "CREATE"
-+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
-+
-+/* We hand errors as strings, for portability. */
-+struct xsd_errors
-+{
-+ int errnum;
-+ const char *errstring;
-+};
-+#define XSD_ERROR(x) { x, #x }
-+static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
-+ XSD_ERROR(EINVAL),
-+ XSD_ERROR(EACCES),
-+ XSD_ERROR(EEXIST),
-+ XSD_ERROR(EISDIR),
-+ XSD_ERROR(ENOENT),
-+ XSD_ERROR(ENOMEM),
-+ XSD_ERROR(ENOSPC),
-+ XSD_ERROR(EIO),
-+ XSD_ERROR(ENOTEMPTY),
-+ XSD_ERROR(ENOSYS),
-+ XSD_ERROR(EROFS),
-+ XSD_ERROR(EBUSY),
-+ XSD_ERROR(EAGAIN),
-+ XSD_ERROR(EISCONN)
-+};
-+
-+struct xsd_sockmsg
-+{
-+ uint32_t type; /* XS_??? */
-+ uint32_t req_id;/* Request identifier, echoed in daemon's response. */
-+ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
-+ uint32_t len; /* Length of data following this. */
-+
-+ /* Generally followed by nul-terminated string(s). */
-+};
-+
-+enum xs_watch_type
-+{
-+ XS_WATCH_PATH = 0,
-+ XS_WATCH_TOKEN
-+};
-+
-+/* Inter-domain shared memory communications. */
-+#define XENSTORE_RING_SIZE 1024
-+typedef uint32_t XENSTORE_RING_IDX;
-+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
-+struct xenstore_domain_interface {
-+ char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
-+ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
-+ XENSTORE_RING_IDX req_cons, req_prod;
-+ XENSTORE_RING_IDX rsp_cons, rsp_prod;
-+};
-+
-+#endif /* _XS_WIRE_H */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
-new file mode 100644
-index 0000000..f3f16fe
---- /dev/null
-+++ b/include/xen/interface/memory.h
-@@ -0,0 +1,153 @@
-+/******************************************************************************
-+ * memory.h
-+ *
-+ * Memory reservation and information.
-+ *
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_MEMORY_H__
-+#define __XEN_PUBLIC_MEMORY_H__
-+
-+/*
-+ * Increase or decrease the specified domain's memory reservation. Returns a
-+ * -ve errcode on failure, or the # extents successfully allocated or freed.
-+ * arg == addr of struct xen_memory_reservation.
-+ */
-+#define XENMEM_increase_reservation 0
-+#define XENMEM_decrease_reservation 1
-+#define XENMEM_populate_physmap 6
-+typedef struct xen_memory_reservation {
-+
-+ /*
-+ * XENMEM_increase_reservation:
-+ * OUT: MFN (*not* GMFN) bases of extents that were allocated
-+ * XENMEM_decrease_reservation:
-+ * IN: GMFN bases of extents to free
-+ * XENMEM_populate_physmap:
-+ * IN: GPFN bases of extents to populate with memory
-+ * OUT: GMFN bases of extents that were allocated
-+ * (NB. This command also updates the mach_to_phys translation table)
-+ */
-+ GUEST_HANDLE(ulong) extent_start;
-+
-+ /* Number of extents, and size/alignment of each (2^extent_order pages). */
-+ unsigned long nr_extents;
-+ unsigned int extent_order;
-+
-+ /*
-+ * Maximum # bits addressable by the user of the allocated region (e.g.,
-+ * I/O devices often have a 32-bit limitation even in 64-bit systems). If
-+ * zero then the user has no addressing restriction.
-+ * This field is not used by XENMEM_decrease_reservation.
-+ */
-+ unsigned int address_bits;
-+
-+ /*
-+ * Domain whose reservation is being changed.
-+ * Unprivileged domains can specify only DOMID_SELF.
-+ */
-+ domid_t domid;
-+
-+} xen_memory_reservation_t;
-+DEFINE_GUEST_HANDLE(xen_memory_reservation_t);
-+
-+/*
-+ * Returns the maximum machine frame number of mapped RAM in this system.
-+ * This command always succeeds (it never returns an error code).
-+ * arg == NULL.
-+ */
-+#define XENMEM_maximum_ram_page 2
-+
-+/*
-+ * Returns the current or maximum memory reservation, in pages, of the
-+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
-+ * arg == addr of domid_t.
-+ */
-+#define XENMEM_current_reservation 3
-+#define XENMEM_maximum_reservation 4
-+
-+/*
-+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
-+ * mapping table. Architectures which do not have a m2p table do not implement
-+ * this command.
-+ * arg == addr of xen_machphys_mfn_list_t.
-+ */
-+#define XENMEM_machphys_mfn_list 5
-+typedef struct xen_machphys_mfn_list {
-+ /*
-+ * Size of the 'extent_start' array. Fewer entries will be filled if the
-+ * machphys table is smaller than max_extents * 2MB.
-+ */
-+ unsigned int max_extents;
-+
-+ /*
-+ * Pointer to buffer to fill with list of extent starts. If there are
-+ * any large discontiguities in the machine address space, 2MB gaps in
-+ * the machphys table will be represented by an MFN base of zero.
-+ */
-+ GUEST_HANDLE(ulong) extent_start;
-+
-+ /*
-+ * Number of extents written to the above array. This will be smaller
-+ * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
-+ */
-+ unsigned int nr_extents;
-+} xen_machphys_mfn_list_t;
-+DEFINE_GUEST_HANDLE(xen_machphys_mfn_list_t);
-+
-+/*
-+ * Returns the base and size of the specified reserved 'RAM hole' in the
-+ * specified guest's pseudophysical address space.
-+ * arg == addr of xen_reserved_phys_area_t.
-+ */
-+#define XENMEM_reserved_phys_area 7
-+typedef struct xen_reserved_phys_area {
-+ /* Which domain to report about? */
-+ domid_t domid;
-+
-+ /*
-+ * Which reserved area to report? Out-of-range request reports
-+ * -ESRCH. Currently no architecture will have more than one reserved area.
-+ */
-+ unsigned int idx;
-+
-+ /* Base and size of the specified reserved area. */
-+ unsigned long first_gpfn, nr_gpfns;
-+} xen_reserved_phys_area_t;
-+DEFINE_GUEST_HANDLE(xen_reserved_phys_area_t);
-+
-+/*
-+ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
-+ * code on failure. This call only works for auto-translated guests.
-+ */
-+#define XENMEM_translate_gpfn_list 8
-+typedef struct xen_translate_gpfn_list {
-+ /* Which domain to translate for? */
-+ domid_t domid;
-+
-+ /* Length of list. */
-+ unsigned long nr_gpfns;
-+
-+ /* List of GPFNs to translate. */
-+ GUEST_HANDLE(ulong) gpfn_list;
-+
-+ /*
-+ * Output list to contain MFN translations. May be the same as the input
-+ * list (in which case each input GPFN is overwritten with the output MFN).
-+ */
-+ GUEST_HANDLE(ulong) mfn_list;
-+} xen_translate_gpfn_list_t;
-+DEFINE_GUEST_HANDLE(xen_translate_gpfn_list_t);
-+
-+#endif /* __XEN_PUBLIC_MEMORY_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
-new file mode 100644
-index 0000000..0c0c67b
---- /dev/null
-+++ b/include/xen/interface/nmi.h
-@@ -0,0 +1,54 @@
-+/******************************************************************************
-+ * nmi.h
-+ *
-+ * NMI callback registration and reason codes.
-+ *
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_NMI_H__
-+#define __XEN_PUBLIC_NMI_H__
-+
-+/*
-+ * NMI reason codes:
-+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
-+ */
-+ /* I/O-check error reported via ISA port 0x61, bit 6. */
-+#define _XEN_NMIREASON_io_error 0
-+#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
-+ /* Parity error reported via ISA port 0x61, bit 7. */
-+#define _XEN_NMIREASON_parity_error 1
-+#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error)
-+ /* Unknown hardware-generated NMI. */
-+#define _XEN_NMIREASON_unknown 2
-+#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
-+
-+/*
-+ * long nmi_op(unsigned int cmd, void *arg)
-+ * NB. All ops return zero on success, else a negative error code.
-+ */
-+
-+/*
-+ * Register NMI callback for this (calling) VCPU. Currently this only makes
-+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
-+ * arg == address of callback function.
-+ */
-+#define XENNMI_register_callback 0
-+
-+/*
-+ * Deregister NMI callback for this (calling) VCPU.
-+ * arg == NULL.
-+ */
-+#define XENNMI_unregister_callback 1
-+
-+#endif /* __XEN_PUBLIC_NMI_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
-new file mode 100644
-index 0000000..2df68b4
---- /dev/null
-+++ b/include/xen/interface/physdev.h
-@@ -0,0 +1,70 @@
-+
-+#ifndef __XEN_PUBLIC_PHYSDEV_H__
-+#define __XEN_PUBLIC_PHYSDEV_H__
-+
-+/* Commands to HYPERVISOR_physdev_op() */
-+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
-+#define PHYSDEVOP_IRQ_STATUS_QUERY 5
-+#define PHYSDEVOP_SET_IOPL 6
-+#define PHYSDEVOP_SET_IOBITMAP 7
-+#define PHYSDEVOP_APIC_READ 8
-+#define PHYSDEVOP_APIC_WRITE 9
-+#define PHYSDEVOP_ASSIGN_VECTOR 10
-+
-+typedef struct physdevop_irq_status_query {
-+ /* IN */
-+ uint32_t irq;
-+ /* OUT */
-+/* Need to call PHYSDEVOP_IRQ_UNMASK_NOTIFY when the IRQ has been serviced? */
-+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY (1<<0)
-+ uint32_t flags;
-+} physdevop_irq_status_query_t;
-+
-+typedef struct physdevop_set_iopl {
-+ /* IN */
-+ uint32_t iopl;
-+} physdevop_set_iopl_t;
-+
-+typedef struct physdevop_set_iobitmap {
-+ /* IN */
-+ uint8_t *bitmap;
-+ uint32_t nr_ports;
-+} physdevop_set_iobitmap_t;
-+
-+typedef struct physdevop_apic {
-+ /* IN */
-+ unsigned long apic_physbase;
-+ uint32_t reg;
-+ /* IN or OUT */
-+ uint32_t value;
-+} physdevop_apic_t;
-+
-+typedef struct physdevop_irq {
-+ /* IN */
-+ uint32_t irq;
-+ /* OUT */
-+ uint32_t vector;
-+} physdevop_irq_t;
-+
-+typedef struct physdev_op {
-+ uint32_t cmd;
-+ union {
-+ physdevop_irq_status_query_t irq_status_query;
-+ physdevop_set_iopl_t set_iopl;
-+ physdevop_set_iobitmap_t set_iobitmap;
-+ physdevop_apic_t apic_op;
-+ physdevop_irq_t irq_op;
-+ } u;
-+} physdev_op_t;
-+
-+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
-new file mode 100644
-index 0000000..a1a03de
---- /dev/null
-+++ b/include/xen/interface/sched.h
-@@ -0,0 +1,60 @@
-+/******************************************************************************
-+ * sched.h
-+ *
-+ * Scheduler state interactions
-+ *
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_SCHED_H__
-+#define __XEN_PUBLIC_SCHED_H__
-+
-+/*
-+ * Prototype for this hypercall is:
-+ * int sched_op(int cmd, unsigned long arg)
-+ * @cmd == SCHEDOP_??? (scheduler operation).
-+ * @arg == Operation-specific extra argument(s).
-+ */
-+
-+/*
-+ * Voluntarily yield the CPU.
-+ * @arg == 0.
-+ */
-+#define SCHEDOP_yield 0
-+
-+/*
-+ * Block execution of this VCPU until an event is received for processing.
-+ * If called with event upcalls masked, this operation will atomically
-+ * reenable event delivery and check for pending events before blocking the
-+ * VCPU. This avoids a "wakeup waiting" race.
-+ * @arg == 0.
-+ */
-+#define SCHEDOP_block 1
-+
-+/*
-+ * Halt execution of this domain (all VCPUs) and notify the system controller.
-+ * @arg == SHUTDOWN_??? (reason for shutdown).
-+ */
-+#define SCHEDOP_shutdown 2
-+
-+/*
-+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by controller
-+ * software to determine the appropriate action. For the most part, Xen does
-+ * not care about the shutdown code.
-+ */
-+#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
-+#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
-+#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
-+#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
-+
-+#endif /* __XEN_PUBLIC_SCHED_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/sched_ctl.h b/include/xen/interface/sched_ctl.h
-new file mode 100644
-index 0000000..600c43a
---- /dev/null
-+++ b/include/xen/interface/sched_ctl.h
-@@ -0,0 +1,64 @@
-+/******************************************************************************
-+ * Generic scheduler control interface.
-+ *
-+ * Mark Williamson, (C) 2004 Intel Research Cambridge
-+ */
-+
-+#ifndef __XEN_PUBLIC_SCHED_CTL_H__
-+#define __XEN_PUBLIC_SCHED_CTL_H__
-+
-+/* Scheduler types. */
-+#define SCHED_BVT 0
-+#define SCHED_SEDF 4
-+
-+/* Set or get info? */
-+#define SCHED_INFO_PUT 0
-+#define SCHED_INFO_GET 1
-+
-+/*
-+ * Generic scheduler control command - used to adjust system-wide scheduler
-+ * parameters
-+ */
-+struct sched_ctl_cmd {
-+ uint32_t sched_id;
-+ uint32_t direction;
-+ union {
-+ struct bvt_ctl {
-+ uint32_t ctx_allow;
-+ } bvt;
-+ } u;
-+};
-+
-+struct sched_adjdom_cmd {
-+ uint32_t sched_id;
-+ uint32_t direction;
-+ domid_t domain;
-+ union {
-+ struct bvt_adjdom {
-+ uint32_t mcu_adv; /* mcu advance: inverse of weight */
-+ uint32_t warpback; /* warp? */
-+ int32_t warpvalue; /* warp value */
-+ int64_t warpl; /* warp limit */
-+ int64_t warpu; /* unwarp time requirement */
-+ } bvt;
-+ struct sedf_adjdom {
-+ uint64_t period;
-+ uint64_t slice;
-+ uint64_t latency;
-+ uint32_t extratime;
-+ uint32_t weight;
-+ } sedf;
-+ } u;
-+};
-+
-+#endif /* __XEN_PUBLIC_SCHED_CTL_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/trace.h b/include/xen/interface/trace.h
-new file mode 100644
-index 0000000..cba4b26
---- /dev/null
-+++ b/include/xen/interface/trace.h
-@@ -0,0 +1,90 @@
-+/******************************************************************************
-+ * include/public/trace.h
-+ *
-+ * Mark Williamson, (C) 2004 Intel Research Cambridge
-+ * Copyright (C) 2005 Bin Ren
-+ */
-+
-+#ifndef __XEN_PUBLIC_TRACE_H__
-+#define __XEN_PUBLIC_TRACE_H__
-+
-+/* Trace classes */
-+#define TRC_CLS_SHIFT 16
-+#define TRC_GEN 0x0001f000 /* General trace */
-+#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
-+#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
-+#define TRC_VMX 0x0008f000 /* Xen VMX trace */
-+#define TRC_MEM 0x000af000 /* Xen memory trace */
-+#define TRC_ALL 0xfffff000
-+
-+/* Trace subclasses */
-+#define TRC_SUBCLS_SHIFT 12
-+/* trace subclasses for VMX */
-+#define TRC_VMXEXIT 0x00081000 /* VMX exit trace */
-+#define TRC_VMXTIMER 0x00082000 /* VMX timer trace */
-+#define TRC_VMXINT 0x00084000 /* VMX interrupt trace */
-+#define TRC_VMXIO 0x00088000 /* VMX io emulation trace */
-+#define TRC_VMEXIT_HANDLER 0x00090000 /* VMX handler trace */
-+
-+/* Trace events per class */
-+
-+#define TRC_SCHED_DOM_ADD (TRC_SCHED + 1)
-+#define TRC_SCHED_DOM_REM (TRC_SCHED + 2)
-+#define TRC_SCHED_SLEEP (TRC_SCHED + 3)
-+#define TRC_SCHED_WAKE (TRC_SCHED + 4)
-+#define TRC_SCHED_YIELD (TRC_SCHED + 5)
-+#define TRC_SCHED_BLOCK (TRC_SCHED + 6)
-+#define TRC_SCHED_SHUTDOWN (TRC_SCHED + 7)
-+#define TRC_SCHED_CTL (TRC_SCHED + 8)
-+#define TRC_SCHED_ADJDOM (TRC_SCHED + 9)
-+#define TRC_SCHED_SWITCH (TRC_SCHED + 10)
-+#define TRC_SCHED_S_TIMER_FN (TRC_SCHED + 11)
-+#define TRC_SCHED_T_TIMER_FN (TRC_SCHED + 12)
-+#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED + 13)
-+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
-+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
-+
-+#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
-+#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
-+#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
-+
-+/* trace events per subclass */
-+#define TRC_VMX_VMEXIT (TRC_VMXEXIT + 1)
-+#define TRC_VMX_VECTOR (TRC_VMXEXIT + 2)
-+
-+#define TRC_VMX_TIMER_INTR (TRC_VMXTIMER + 1)
-+
-+#define TRC_VMX_INT (TRC_VMXINT + 1)
-+
-+#define TRC_VMEXIT (TRC_VMEXIT_HANDLER + 1)
-+#define TRC_VMENTRY (TRC_VMEXIT_HANDLER + 2)
-+
-+
-+/* This structure represents a single trace buffer record. */
-+struct t_rec {
-+ uint64_t cycles; /* cycle counter timestamp */
-+ uint32_t event; /* event ID */
-+ unsigned long data[5]; /* event data items */
-+};
-+
-+/*
-+ * This structure contains the metadata for a single trace buffer. The head
-+ * field, indexes into an array of struct t_rec's.
-+ */
-+struct t_buf {
-+ uint32_t cons; /* Next item to be consumed by control tools. */
-+ uint32_t prod; /* Next item to be produced by Xen. */
-+ /* 'nr_recs' records follow immediately after the meta-data header. */
-+};
-+
-+#endif /* __XEN_PUBLIC_TRACE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
-new file mode 100644
-index 0000000..1c36f81
---- /dev/null
-+++ b/include/xen/interface/vcpu.h
-@@ -0,0 +1,119 @@
-+/******************************************************************************
-+ * vcpu.h
-+ *
-+ * VCPU initialisation, query, and hotplug.
-+ *
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_VCPU_H__
-+#define __XEN_PUBLIC_VCPU_H__
-+
-+/*
-+ * Prototype for this hypercall is:
-+ * int vcpu_op(int cmd, int vcpuid, void *extra_args)
-+ * @cmd == VCPUOP_??? (VCPU operation).
-+ * @vcpuid == VCPU to operate on.
-+ * @extra_args == Operation-specific extra arguments (NULL if none).
-+ */
-+
-+/*
-+ * Initialise a VCPU. Each VCPU can be initialised only once. A
-+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
-+ *
-+ * @extra_arg == pointer to vcpu_guest_context structure containing initial
-+ * state for the VCPU.
-+ */
-+#define VCPUOP_initialise 0
-+
-+/*
-+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
-+ * if the VCPU has not been initialised (VCPUOP_initialise).
-+ */
-+#define VCPUOP_up 1
-+
-+/*
-+ * Bring down a VCPU (i.e., make it non-runnable).
-+ * There are a few caveats that callers should observe:
-+ * 1. This operation may return, and VCPU_is_up may return false, before the
-+ * VCPU stops running (i.e., the command is asynchronous). It is a good
-+ * idea to ensure that the VCPU has entered a non-critical loop before
-+ * bringing it down. Alternatively, this operation is guaranteed
-+ * synchronous if invoked by the VCPU itself.
-+ * 2. After a VCPU is initialised, there is currently no way to drop all its
-+ * references to domain memory. Even a VCPU that is down still holds
-+ * memory references via its pagetable base pointer and GDT. It is good
-+ * practise to move a VCPU onto an 'idle' or default page table, LDT and
-+ * GDT before bringing it down.
-+ */
-+#define VCPUOP_down 2
-+
-+/* Returns 1 if the given VCPU is up. */
-+#define VCPUOP_is_up 3
-+
-+/*
-+ * Return information about the state and running time of a VCPU.
-+ * @extra_arg == pointer to vcpu_runstate_info structure.
-+ */
-+#define VCPUOP_get_runstate_info 4
-+typedef struct vcpu_runstate_info {
-+ /* VCPU's current state (RUNSTATE_*). */
-+ int state;
-+ /* When was current state entered (system time, ns)? */
-+ uint64_t state_entry_time;
-+ /*
-+ * Time spent in each RUNSTATE_* (ns). The sum of these times is
-+ * guaranteed not to drift from system time.
-+ */
-+ uint64_t time[4];
-+} vcpu_runstate_info_t;
-+
-+/* VCPU is currently running on a physical CPU. */
-+#define RUNSTATE_running 0
-+
-+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
-+#define RUNSTATE_runnable 1
-+
-+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
-+#define RUNSTATE_blocked 2
-+
-+/*
-+ * VCPU is not runnable, but it is not blocked.
-+ * This is a 'catch all' state for things like hotplug and pauses by the
-+ * system administrator (or for critical sections in the hypervisor).
-+ * RUNSTATE_blocked dominates this state (it is the preferred state).
-+ */
-+#define RUNSTATE_offline 3
-+
-+/*
-+ * Register a shared memory area from which the guest may obtain its own
-+ * runstate information without needing to execute a hypercall.
-+ * Notes:
-+ * 1. The registered address may be virtual or physical, depending on the
-+ * platform. The virtual address should be registered on x86 systems.
-+ * 2. Only one shared area may be registered per VCPU. The shared area is
-+ * updated by the hypervisor each time the VCPU is scheduled. Thus
-+ * runstate.state will always be RUNSTATE_running and
-+ * runstate.state_entry_time will indicate the system time at which the
-+ * VCPU was last scheduled to run.
-+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
-+ */
-+#define VCPUOP_register_runstate_memory_area 5
-+typedef struct vcpu_register_runstate_memory_area {
-+ union {
-+ struct vcpu_runstate_info *v;
-+ uint64_t p;
-+ } addr;
-+} vcpu_register_runstate_memory_area_t;
-+
-+#endif /* __XEN_PUBLIC_VCPU_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h
-new file mode 100644
-index 0000000..9a496f9
---- /dev/null
-+++ b/include/xen/interface/version.h
-@@ -0,0 +1,64 @@
-+/******************************************************************************
-+ * version.h
-+ *
-+ * Xen version, type, and compile information.
-+ *
-+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
-+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_VERSION_H__
-+#define __XEN_PUBLIC_VERSION_H__
-+
-+/* NB. All ops return zero on success, except XENVER_version. */
-+
-+/* arg == NULL; returns major:minor (16:16). */
-+#define XENVER_version 0
-+
-+/* arg == xen_extraversion_t. */
-+#define XENVER_extraversion 1
-+typedef char xen_extraversion_t[16];
-+#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
-+
-+/* arg == xen_compile_info_t. */
-+#define XENVER_compile_info 2
-+typedef struct xen_compile_info {
-+ char compiler[64];
-+ char compile_by[16];
-+ char compile_domain[32];
-+ char compile_date[32];
-+} xen_compile_info_t;
-+
-+#define XENVER_capabilities 3
-+typedef char xen_capabilities_info_t[1024];
-+#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
-+
-+#define XENVER_changeset 4
-+typedef char xen_changeset_info_t[64];
-+#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
-+
-+#define XENVER_platform_parameters 5
-+typedef struct xen_platform_parameters {
-+ unsigned long virt_start;
-+} xen_platform_parameters_t;
-+
-+#define XENVER_get_features 6
-+typedef struct xen_feature_info {
-+ unsigned int submap_idx; /* IN: which 32-bit submap to return */
-+ uint32_t submap; /* OUT: 32-bit submap */
-+} xen_feature_info_t;
-+
-+/* Declares the features reported by XENVER_get_features. */
-+#include "features.h"
-+
-+#endif /* __XEN_PUBLIC_VERSION_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
-new file mode 100644
-index 0000000..6e1f366
---- /dev/null
-+++ b/include/xen/interface/xen.h
-@@ -0,0 +1,447 @@
-+/******************************************************************************
-+ * xen.h
-+ *
-+ * Guest OS interface to Xen.
-+ *
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_XEN_H__
-+#define __XEN_PUBLIC_XEN_H__
-+
-+#if defined(__i386__)
-+#include "arch-x86_32.h"
-+#elif defined(__x86_64__)
-+#include "arch-x86_64.h"
-+#elif defined(__ia64__)
-+#include "arch-ia64.h"
-+#else
-+#error "Unsupported architecture"
-+#endif
-+
-+/*
-+ * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
-+ */
-+
-+/*
-+ * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
-+ * EAX = return value
-+ * (argument registers may be clobbered on return)
-+ * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
-+ * RAX = return value
-+ * (argument registers not clobbered on return; RCX, R11 are)
-+ */
-+#define __HYPERVISOR_set_trap_table 0
-+#define __HYPERVISOR_mmu_update 1
-+#define __HYPERVISOR_set_gdt 2
-+#define __HYPERVISOR_stack_switch 3
-+#define __HYPERVISOR_set_callbacks 4
-+#define __HYPERVISOR_fpu_taskswitch 5
-+#define __HYPERVISOR_sched_op 6
-+#define __HYPERVISOR_dom0_op 7
-+#define __HYPERVISOR_set_debugreg 8
-+#define __HYPERVISOR_get_debugreg 9
-+#define __HYPERVISOR_update_descriptor 10
-+#define __HYPERVISOR_memory_op 12
-+#define __HYPERVISOR_multicall 13
-+#define __HYPERVISOR_update_va_mapping 14
-+#define __HYPERVISOR_set_timer_op 15
-+#define __HYPERVISOR_event_channel_op 16
-+#define __HYPERVISOR_xen_version 17
-+#define __HYPERVISOR_console_io 18
-+#define __HYPERVISOR_physdev_op 19
-+#define __HYPERVISOR_grant_table_op 20
-+#define __HYPERVISOR_vm_assist 21
-+#define __HYPERVISOR_update_va_mapping_otherdomain 22
-+#define __HYPERVISOR_iret 23 /* x86 only */
-+#define __HYPERVISOR_switch_vm86 23 /* x86/32 only (obsolete name) */
-+#define __HYPERVISOR_switch_to_user 23 /* x86/64 only (obsolete name) */
-+#define __HYPERVISOR_vcpu_op 24
-+#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
-+#define __HYPERVISOR_mmuext_op 26
-+#define __HYPERVISOR_acm_op 27
-+#define __HYPERVISOR_nmi_op 28
-+
-+/*
-+ * VIRTUAL INTERRUPTS
-+ *
-+ * Virtual interrupts that a guest OS may receive from Xen.
-+ */
-+#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */
-+#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */
-+#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
-+#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
-+#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
-+#define NR_VIRQS 8
-+
-+/*
-+ * MMU-UPDATE REQUESTS
-+ *
-+ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
-+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
-+ * Where the FD has some effect, it is described below.
-+ * ptr[1:0] specifies the appropriate MMU_* command.
-+ *
-+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
-+ * Updates an entry in a page table. If updating an L1 table, and the new
-+ * table entry is valid/present, the mapped frame must belong to the FD, if
-+ * an FD has been specified. If attempting to map an I/O page then the
-+ * caller assumes the privilege of the FD.
-+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
-+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
-+ * ptr[:2] -- Machine address of the page-table entry to modify.
-+ * val -- Value to write.
-+ *
-+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
-+ * Updates an entry in the machine->pseudo-physical mapping table.
-+ * ptr[:2] -- Machine address within the frame whose mapping to modify.
-+ * The frame must belong to the FD, if one is specified.
-+ * val -- Value to write into the mapping entry.
-+ */
-+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
-+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
-+
-+/*
-+ * MMU EXTENDED OPERATIONS
-+ *
-+ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
-+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
-+ * Where the FD has some effect, it is described below.
-+ *
-+ * cmd: MMUEXT_(UN)PIN_*_TABLE
-+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
-+ * The frame must belong to the FD, if one is specified.
-+ *
-+ * cmd: MMUEXT_NEW_BASEPTR
-+ * mfn: Machine frame number of new page-table base to install in MMU.
-+ *
-+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
-+ * mfn: Machine frame number of new page-table base to install in MMU
-+ * when in user space.
-+ *
-+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
-+ * No additional arguments. Flushes local TLB.
-+ *
-+ * cmd: MMUEXT_INVLPG_LOCAL
-+ * linear_addr: Linear address to be flushed from the local TLB.
-+ *
-+ * cmd: MMUEXT_TLB_FLUSH_MULTI
-+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
-+ *
-+ * cmd: MMUEXT_INVLPG_MULTI
-+ * linear_addr: Linear address to be flushed.
-+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
-+ *
-+ * cmd: MMUEXT_TLB_FLUSH_ALL
-+ * No additional arguments. Flushes all VCPUs' TLBs.
-+ *
-+ * cmd: MMUEXT_INVLPG_ALL
-+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
-+ *
-+ * cmd: MMUEXT_FLUSH_CACHE
-+ * No additional arguments. Writes back and flushes cache contents.
-+ *
-+ * cmd: MMUEXT_SET_LDT
-+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
-+ * nr_ents: Number of entries in LDT.
-+ */
-+#define MMUEXT_PIN_L1_TABLE 0
-+#define MMUEXT_PIN_L2_TABLE 1
-+#define MMUEXT_PIN_L3_TABLE 2
-+#define MMUEXT_PIN_L4_TABLE 3
-+#define MMUEXT_UNPIN_TABLE 4
-+#define MMUEXT_NEW_BASEPTR 5
-+#define MMUEXT_TLB_FLUSH_LOCAL 6
-+#define MMUEXT_INVLPG_LOCAL 7
-+#define MMUEXT_TLB_FLUSH_MULTI 8
-+#define MMUEXT_INVLPG_MULTI 9
-+#define MMUEXT_TLB_FLUSH_ALL 10
-+#define MMUEXT_INVLPG_ALL 11
-+#define MMUEXT_FLUSH_CACHE 12
-+#define MMUEXT_SET_LDT 13
-+#define MMUEXT_NEW_USER_BASEPTR 15
-+
-+#ifndef __ASSEMBLY__
-+struct mmuext_op {
-+ unsigned int cmd;
-+ union {
-+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
-+ unsigned long mfn;
-+ /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
-+ unsigned long linear_addr;
-+ } arg1;
-+ union {
-+ /* SET_LDT */
-+ unsigned int nr_ents;
-+ /* TLB_FLUSH_MULTI, INVLPG_MULTI */
-+ void *vcpumask;
-+ } arg2;
-+};
-+#endif
-+
-+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
-+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
-+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
-+#define UVMF_NONE (0UL<<0) /* No flushing at all. */
-+#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
-+#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
-+#define UVMF_FLUSHTYPE_MASK (3UL<<0)
-+#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
-+#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
-+#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
-+
-+/*
-+ * Commands to HYPERVISOR_console_io().
-+ */
-+#define CONSOLEIO_write 0
-+#define CONSOLEIO_read 1
-+
-+/*
-+ * Commands to HYPERVISOR_vm_assist().
-+ */
-+#define VMASST_CMD_enable 0
-+#define VMASST_CMD_disable 1
-+#define VMASST_TYPE_4gb_segments 0
-+#define VMASST_TYPE_4gb_segments_notify 1
-+#define VMASST_TYPE_writable_pagetables 2
-+#define MAX_VMASST_TYPE 2
-+
-+#ifndef __ASSEMBLY__
-+
-+typedef uint16_t domid_t;
-+
-+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
-+#define DOMID_FIRST_RESERVED (0x7FF0U)
-+
-+/* DOMID_SELF is used in certain contexts to refer to oneself. */
-+#define DOMID_SELF (0x7FF0U)
-+
-+/*
-+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
-+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
-+ * is useful to ensure that no mappings to the OS's own heap are accidentally
-+ * installed. (e.g., in Linux this could cause havoc as reference counts
-+ * aren't adjusted on the I/O-mapping code path).
-+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
-+ * be specified by any calling domain.
-+ */
-+#define DOMID_IO (0x7FF1U)
-+
-+/*
-+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
-+ * Xen's heap space (e.g., the machine_to_phys table).
-+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
-+ * the caller is privileged.
-+ */
-+#define DOMID_XEN (0x7FF2U)
-+
-+/*
-+ * Send an array of these to HYPERVISOR_mmu_update().
-+ * NB. The fields are natural pointer/address size for this architecture.
-+ */
-+typedef struct mmu_update {
-+ uint64_t ptr; /* Machine address of PTE. */
-+ uint64_t val; /* New contents of PTE. */
-+} mmu_update_t;
-+
-+/*
-+ * Send an array of these to HYPERVISOR_multicall().
-+ * NB. The fields are natural register size for this architecture.
-+ */
-+typedef struct multicall_entry {
-+ unsigned long op, result;
-+ unsigned long args[6];
-+} multicall_entry_t;
-+
-+/*
-+ * Event channel endpoints per domain:
-+ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
-+ */
-+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
-+
-+typedef struct vcpu_time_info {
-+ /*
-+ * Updates to the following values are preceded and followed by an
-+ * increment of 'version'. The guest can therefore detect updates by
-+ * looking for changes to 'version'. If the least-significant bit of
-+ * the version number is set then an update is in progress and the guest
-+ * must wait to read a consistent set of values.
-+ * The correct way to interact with the version number is similar to
-+ * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
-+ */
-+ uint32_t version;
-+ uint32_t pad0;
-+ uint64_t tsc_timestamp; /* TSC at last update of time vals. */
-+ uint64_t system_time; /* Time, in nanosecs, since boot. */
-+ /*
-+ * Current system time:
-+ * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
-+ * CPU frequency (Hz):
-+ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
-+ */
-+ uint32_t tsc_to_system_mul;
-+ int8_t tsc_shift;
-+ int8_t pad1[3];
-+} vcpu_time_info_t; /* 32 bytes */
-+
-+typedef struct vcpu_info {
-+ /*
-+ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
-+ * a pending notification for a particular VCPU. It is then cleared
-+ * by the guest OS /before/ checking for pending work, thus avoiding
-+ * a set-and-check race. Note that the mask is only accessed by Xen
-+ * on the CPU that is currently hosting the VCPU. This means that the
-+ * pending and mask flags can be updated by the guest without special
-+ * synchronisation (i.e., no need for the x86 LOCK prefix).
-+ * This may seem suboptimal because if the pending flag is set by
-+ * a different CPU then an IPI may be scheduled even when the mask
-+ * is set. However, note:
-+ * 1. The task of 'interrupt holdoff' is covered by the per-event-
-+ * channel mask bits. A 'noisy' event that is continually being
-+ * triggered can be masked at source at this very precise
-+ * granularity.
-+ * 2. The main purpose of the per-VCPU mask is therefore to restrict
-+ * reentrant execution: whether for concurrency control, or to
-+ * prevent unbounded stack usage. Whatever the purpose, we expect
-+ * that the mask will be asserted only for short periods at a time,
-+ * and so the likelihood of a 'spurious' IPI is suitably small.
-+ * The mask is read before making an event upcall to the guest: a
-+ * non-zero mask therefore guarantees that the VCPU will not receive
-+ * an upcall activation. The mask is cleared when the VCPU requests
-+ * to block: this avoids wakeup-waiting races.
-+ */
-+ uint8_t evtchn_upcall_pending;
-+ uint8_t evtchn_upcall_mask;
-+ unsigned long evtchn_pending_sel;
-+ arch_vcpu_info_t arch;
-+ vcpu_time_info_t time;
-+} vcpu_info_t; /* 64 bytes (x86) */
-+
-+/*
-+ * Xen/kernel shared data -- pointer provided in start_info.
-+ * NB. We expect that this struct is smaller than a page.
-+ */
-+typedef struct shared_info {
-+ vcpu_info_t vcpu_info[MAX_VIRT_CPUS];
-+
-+ /*
-+ * A domain can create "event channels" on which it can send and receive
-+ * asynchronous event notifications. There are three classes of event that
-+ * are delivered by this mechanism:
-+ * 1. Bi-directional inter- and intra-domain connections. Domains must
-+ * arrange out-of-band to set up a connection (usually by allocating
-+ * an unbound 'listener' port and avertising that via a storage service
-+ * such as xenstore).
-+ * 2. Physical interrupts. A domain with suitable hardware-access
-+ * privileges can bind an event-channel port to a physical interrupt
-+ * source.
-+ * 3. Virtual interrupts ('events'). A domain can bind an event-channel
-+ * port to a virtual interrupt source, such as the virtual-timer
-+ * device or the emergency console.
-+ *
-+ * Event channels are addressed by a "port index". Each channel is
-+ * associated with two bits of information:
-+ * 1. PENDING -- notifies the domain that there is a pending notification
-+ * to be processed. This bit is cleared by the guest.
-+ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
-+ * will cause an asynchronous upcall to be scheduled. This bit is only
-+ * updated by the guest. It is read-only within Xen. If a channel
-+ * becomes pending while the channel is masked then the 'edge' is lost
-+ * (i.e., when the channel is unmasked, the guest must manually handle
-+ * pending notifications as no upcall will be scheduled by Xen).
-+ *
-+ * To expedite scanning of pending notifications, any 0->1 pending
-+ * transition on an unmasked channel causes a corresponding bit in a
-+ * per-vcpu selector word to be set. Each bit in the selector covers a
-+ * 'C long' in the PENDING bitfield array.
-+ */
-+ unsigned long evtchn_pending[sizeof(unsigned long) * 8];
-+ unsigned long evtchn_mask[sizeof(unsigned long) * 8];
-+
-+ /*
-+ * Wallclock time: updated only by control software. Guests should base
-+ * their gettimeofday() syscall on this wallclock-base value.
-+ */
-+ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
-+ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
-+ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
-+
-+ arch_shared_info_t arch;
-+
-+} shared_info_t;
-+
-+/*
-+ * Start-of-day memory layout for the initial domain (DOM0):
-+ * 1. The domain is started within contiguous virtual-memory region.
-+ * 2. The contiguous region begins and ends on an aligned 4MB boundary.
-+ * 3. The region start corresponds to the load address of the OS image.
-+ * If the load address is not 4MB aligned then the address is rounded down.
-+ * 4. This the order of bootstrap elements in the initial virtual region:
-+ * a. relocated kernel image
-+ * b. initial ram disk [mod_start, mod_len]
-+ * c. list of allocated page frames [mfn_list, nr_pages]
-+ * d. start_info_t structure [register ESI (x86)]
-+ * e. bootstrap page tables [pt_base, CR3 (x86)]
-+ * f. bootstrap stack [register ESP (x86)]
-+ * 5. Bootstrap elements are packed together, but each is 4kB-aligned.
-+ * 6. The initial ram disk may be omitted.
-+ * 7. The list of page frames forms a contiguous 'pseudo-physical' memory
-+ * layout for the domain. In particular, the bootstrap virtual-memory
-+ * region is a 1:1 mapping to the first section of the pseudo-physical map.
-+ * 8. All bootstrap elements are mapped read-writable for the guest OS. The
-+ * only exception is the bootstrap page table, which is mapped read-only.
-+ * 9. There is guaranteed to be at least 512kB padding after the final
-+ * bootstrap element. If necessary, the bootstrap virtual region is
-+ * extended by an extra 4MB to ensure this.
-+ */
-+
-+#define MAX_GUEST_CMDLINE 1024
-+typedef struct start_info {
-+ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
-+ char magic[32]; /* "xen-<version>-<platform>". */
-+ unsigned long nr_pages; /* Total pages allocated to this domain. */
-+ unsigned long shared_info; /* MACHINE address of shared info struct. */
-+ uint32_t flags; /* SIF_xxx flags. */
-+ unsigned long store_mfn; /* MACHINE page number of shared page. */
-+ uint32_t store_evtchn; /* Event channel for store communication. */
-+ unsigned long console_mfn; /* MACHINE address of console page. */
-+ uint32_t console_evtchn; /* Event channel for console messages. */
-+ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
-+ unsigned long pt_base; /* VIRTUAL address of page directory. */
-+ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
-+ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
-+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
-+ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
-+ int8_t cmd_line[MAX_GUEST_CMDLINE];
-+} start_info_t;
-+
-+/* These flags are passed in the 'flags' field of start_info_t. */
-+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
-+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
-+
-+typedef uint64_t cpumap_t;
-+
-+typedef uint8_t xen_domain_handle_t[16];
-+
-+/* Turn a plain number into a C unsigned long constant. */
-+#define __mk_unsigned_long(x) x ## UL
-+#define mk_unsigned_long(x) __mk_unsigned_long(x)
-+
-+#else /* __ASSEMBLY__ */
-+
-+/* In assembly code we cannot use C numeric constant suffixes. */
-+#define mk_unsigned_long(x) x
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif /* __XEN_PUBLIC_XEN_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff --git a/include/xen/net_driver_util.h b/include/xen/net_driver_util.h
-new file mode 100644
-index 0000000..130b4f0
---- /dev/null
-+++ b/include/xen/net_driver_util.h
-@@ -0,0 +1,56 @@
-+/*****************************************************************************
-+ *
-+ * Utility functions for Xen network devices.
-+ *
-+ * Copyright (c) 2005 XenSource Ltd.
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following
-+ * license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject
-+ * to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef _ASM_XEN_NET_DRIVER_UTIL_H
-+#define _ASM_XEN_NET_DRIVER_UTIL_H
-+
-+
-+#include <xen/xenbus.h>
-+
-+
-+/**
-+ * Read the 'mac' node at the given device's node in the store, and parse that
-+ * as colon-separated octets, placing result the given mac array. mac must be
-+ * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
-+ * Return 0 on success, or -errno on error.
-+ */
-+int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]);
-+
-+
-+#endif /* _ASM_XEN_NET_DRIVER_UTIL_H */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/pcifront.h b/include/xen/pcifront.h
-new file mode 100644
-index 0000000..51b170a
---- /dev/null
-+++ b/include/xen/pcifront.h
-@@ -0,0 +1,39 @@
-+/*
-+ * PCI Frontend - arch-dependendent declarations
-+ *
-+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
-+ */
-+#ifndef __XEN_ASM_PCIFRONT_H__
-+#define __XEN_ASM_PCIFRONT_H__
-+
-+#include <linux/config.h>
-+#include <linux/spinlock.h>
-+
-+#ifdef __KERNEL__
-+
-+struct pcifront_device;
-+
-+struct pcifront_sd {
-+ int domain;
-+ struct pcifront_device *pdev;
-+};
-+
-+struct pci_bus;
-+
-+#ifdef CONFIG_PCI_DOMAINS
-+static inline int pci_domain_nr(struct pci_bus *bus)
-+{
-+ struct pcifront_sd *sd = bus->sysdata;
-+ return sd->domain;
-+}
-+static inline int pci_proc_domain(struct pci_bus *bus)
-+{
-+ return pci_domain_nr(bus);
-+}
-+#endif /* CONFIG_PCI_DOMAINS */
-+
-+extern spinlock_t pci_bus_lock;
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* __XEN_ASM_PCIFRONT_H__ */
-diff --git a/include/xen/public/evtchn.h b/include/xen/public/evtchn.h
-new file mode 100644
-index 0000000..456f246
---- /dev/null
-+++ b/include/xen/public/evtchn.h
-@@ -0,0 +1,98 @@
-+/******************************************************************************
-+ * evtchn.h
-+ *
-+ * Interface to /dev/xen/evtchn.
-+ *
-+ * Copyright (c) 2003-2005, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __LINUX_PUBLIC_EVTCHN_H__
-+#define __LINUX_PUBLIC_EVTCHN_H__
-+
-+/* /dev/xen/evtchn resides at device number major=10, minor=201 */
-+#define EVTCHN_MINOR 201
-+
-+/*
-+ * Bind a fresh port to VIRQ @virq.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_VIRQ \
-+ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
-+struct ioctl_evtchn_bind_virq {
-+ unsigned int virq;
-+};
-+
-+/*
-+ * Bind a fresh port to remote <@remote_domain, @remote_port>.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
-+ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
-+struct ioctl_evtchn_bind_interdomain {
-+ unsigned int remote_domain, remote_port;
-+};
-+
-+/*
-+ * Allocate a fresh port for binding to @remote_domain.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
-+ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
-+struct ioctl_evtchn_bind_unbound_port {
-+ unsigned int remote_domain;
-+};
-+
-+/*
-+ * Unbind previously allocated @port.
-+ */
-+#define IOCTL_EVTCHN_UNBIND \
-+ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
-+struct ioctl_evtchn_unbind {
-+ unsigned int port;
-+};
-+
-+/*
-+ * Unbind previously allocated @port.
-+ */
-+#define IOCTL_EVTCHN_NOTIFY \
-+ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
-+struct ioctl_evtchn_notify {
-+ unsigned int port;
-+};
-+
-+/* Clear and reinitialise the event buffer. Clear error condition. */
-+#define IOCTL_EVTCHN_RESET \
-+ _IOC(_IOC_NONE, 'E', 5, 0)
-+
-+#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/public/privcmd.h b/include/xen/public/privcmd.h
-new file mode 100644
-index 0000000..074d66c
---- /dev/null
-+++ b/include/xen/public/privcmd.h
-@@ -0,0 +1,91 @@
-+/******************************************************************************
-+ * privcmd.h
-+ *
-+ * Interface to /proc/xen/privcmd.
-+ *
-+ * Copyright (c) 2003-2005, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __LINUX_PUBLIC_PRIVCMD_H__
-+#define __LINUX_PUBLIC_PRIVCMD_H__
-+
-+#ifndef __user
-+#define __user
-+#endif
-+
-+typedef struct privcmd_hypercall
-+{
-+ unsigned long op;
-+ unsigned long arg[5];
-+} privcmd_hypercall_t;
-+
-+typedef struct privcmd_mmap_entry {
-+ unsigned long va;
-+ unsigned long mfn;
-+ unsigned long npages;
-+} privcmd_mmap_entry_t;
-+
-+typedef struct privcmd_mmap {
-+ int num;
-+ domid_t dom; /* target domain */
-+ privcmd_mmap_entry_t __user *entry;
-+} privcmd_mmap_t;
-+
-+typedef struct privcmd_mmapbatch {
-+ int num; /* number of pages to populate */
-+ domid_t dom; /* target domain */
-+ unsigned long addr; /* virtual address */
-+ unsigned long __user *arr; /* array of mfns - top nibble set on err */
-+} privcmd_mmapbatch_t;
-+
-+typedef struct privcmd_blkmsg
-+{
-+ unsigned long op;
-+ void *buf;
-+ int buf_size;
-+} privcmd_blkmsg_t;
-+
-+/*
-+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
-+ * @arg: &privcmd_hypercall_t
-+ * Return: Value returned from execution of the specified hypercall.
-+ */
-+#define IOCTL_PRIVCMD_HYPERCALL \
-+ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
-+#define IOCTL_PRIVCMD_MMAP \
-+ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
-+#define IOCTL_PRIVCMD_MMAPBATCH \
-+ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
-+
-+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/tpmfe.h b/include/xen/tpmfe.h
-new file mode 100644
-index 0000000..9b8ea7c
---- /dev/null
-+++ b/include/xen/tpmfe.h
-@@ -0,0 +1,40 @@
-+#ifndef TPM_FE_H
-+#define TPM_FE_H
-+
-+struct tpm_private;
-+
-+struct tpmfe_device {
-+ /*
-+ * Let upper layer receive data from front-end
-+ */
-+ int (*receive)(const u8 *buffer, size_t count, const void *ptr);
-+ /*
-+ * Indicate the status of the front-end to the upper
-+ * layer.
-+ */
-+ void (*status)(unsigned int flags);
-+
-+ /*
-+ * This field indicates the maximum size the driver can
-+ * transfer in one chunk. It is filled out by the front-end
-+ * driver and should be propagated to the generic tpm driver
-+ * for allocation of buffers.
-+ */
-+ unsigned int max_tx_size;
-+ /*
-+ * The following is a private structure of the underlying
-+ * driver. It's expected as first parameter in the send function.
-+ */
-+ struct tpm_private *tpm_private;
-+};
-+
-+enum {
-+ TPMFE_STATUS_DISCONNECTED = 0x0,
-+ TPMFE_STATUS_CONNECTED = 0x1
-+};
-+
-+int tpm_fe_send(struct tpm_private * tp, const u8 * buf, size_t count, void *ptr);
-+int tpm_fe_register_receiver(struct tpmfe_device *);
-+void tpm_fe_unregister_receiver(void);
-+
-+#endif
-diff --git a/include/xen/xen_proc.h b/include/xen/xen_proc.h
-new file mode 100644
-index 0000000..dc89521
---- /dev/null
-+++ b/include/xen/xen_proc.h
-@@ -0,0 +1,23 @@
-+
-+#ifndef __ASM_XEN_PROC_H__
-+#define __ASM_XEN_PROC_H__
-+
-+#include <linux/config.h>
-+#include <linux/proc_fs.h>
-+
-+extern struct proc_dir_entry *create_xen_proc_entry(
-+ const char *name, mode_t mode);
-+extern void remove_xen_proc_entry(
-+ const char *name);
-+
-+#endif /* __ASM_XEN_PROC_H__ */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
-new file mode 100644
-index 0000000..27b08d5
---- /dev/null
-+++ b/include/xen/xenbus.h
-@@ -0,0 +1,298 @@
-+/******************************************************************************
-+ * xenbus.h
-+ *
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 XenSource Ltd.
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef _XEN_XENBUS_H
-+#define _XEN_XENBUS_H
-+
-+#include <linux/device.h>
-+#include <linux/notifier.h>
-+#include <asm/semaphore.h>
-+#include <xen/interface/xen.h>
-+#include <xen/interface/grant_table.h>
-+#include <xen/interface/io/xenbus.h>
-+#include <xen/interface/io/xs_wire.h>
-+
-+#define XBT_NULL 0
-+
-+/* Register callback to watch this node. */
-+struct xenbus_watch
-+{
-+ struct list_head list;
-+
-+ /* Path being watched. */
-+ const char *node;
-+
-+ /* Callback (executed in a process context with no locks held). */
-+ void (*callback)(struct xenbus_watch *,
-+ const char **vec, unsigned int len);
-+};
-+
-+
-+/* A xenbus device. */
-+struct xenbus_device {
-+ const char *devicetype;
-+ const char *nodename;
-+ const char *otherend;
-+ int otherend_id;
-+ struct xenbus_watch otherend_watch;
-+ struct device dev;
-+ XenbusState state;
-+ void *data;
-+};
-+
-+static inline struct xenbus_device *to_xenbus_device(struct device *dev)
-+{
-+ return container_of(dev, struct xenbus_device, dev);
-+}
-+
-+struct xenbus_device_id
-+{
-+ /* .../device/<device_type>/<identifier> */
-+ char devicetype[32]; /* General class of device. */
-+};
-+
-+/* A xenbus driver. */
-+struct xenbus_driver {
-+ char *name;
-+ struct module *owner;
-+ const struct xenbus_device_id *ids;
-+ int (*probe)(struct xenbus_device *dev,
-+ const struct xenbus_device_id *id);
-+ void (*otherend_changed)(struct xenbus_device *dev,
-+ XenbusState backend_state);
-+ int (*remove)(struct xenbus_device *dev);
-+ int (*suspend)(struct xenbus_device *dev);
-+ int (*resume)(struct xenbus_device *dev);
-+ int (*uevent)(struct xenbus_device *, char **, int, char *, int);
-+ struct device_driver driver;
-+ int (*read_otherend_details)(struct xenbus_device *dev);
-+};
-+
-+static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
-+{
-+ return container_of(drv, struct xenbus_driver, driver);
-+}
-+
-+int xenbus_register_frontend(struct xenbus_driver *drv);
-+int xenbus_register_backend(struct xenbus_driver *drv);
-+void xenbus_unregister_driver(struct xenbus_driver *drv);
-+
-+typedef u32 xenbus_transaction_t;
-+
-+char **xenbus_directory(xenbus_transaction_t t,
-+ const char *dir, const char *node, unsigned int *num);
-+void *xenbus_read(xenbus_transaction_t t,
-+ const char *dir, const char *node, unsigned int *len);
-+int xenbus_write(xenbus_transaction_t t,
-+ const char *dir, const char *node, const char *string);
-+int xenbus_mkdir(xenbus_transaction_t t,
-+ const char *dir, const char *node);
-+int xenbus_exists(xenbus_transaction_t t,
-+ const char *dir, const char *node);
-+int xenbus_rm(xenbus_transaction_t t, const char *dir, const char *node);
-+int xenbus_transaction_start(xenbus_transaction_t *t);
-+int xenbus_transaction_end(xenbus_transaction_t t, int abort);
-+
-+/* Single read and scanf: returns -errno or num scanned if > 0. */
-+int xenbus_scanf(xenbus_transaction_t t,
-+ const char *dir, const char *node, const char *fmt, ...)
-+ __attribute__((format(scanf, 4, 5)));
-+
-+/* Single printf and write: returns -errno or 0. */
-+int xenbus_printf(xenbus_transaction_t t,
-+ const char *dir, const char *node, const char *fmt, ...)
-+ __attribute__((format(printf, 4, 5)));
-+
-+/* Generic read function: NULL-terminated triples of name,
-+ * sprintf-style type string, and pointer. Returns 0 or errno.*/
-+int xenbus_gather(xenbus_transaction_t t, const char *dir, ...);
-+
-+/* notifer routines for when the xenstore comes up */
-+int register_xenstore_notifier(struct notifier_block *nb);
-+void unregister_xenstore_notifier(struct notifier_block *nb);
-+
-+int register_xenbus_watch(struct xenbus_watch *watch);
-+void unregister_xenbus_watch(struct xenbus_watch *watch);
-+void xs_suspend(void);
-+void xs_resume(void);
-+
-+/* Used by xenbus_dev to borrow kernel's store connection. */
-+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
-+
-+/* Called from xen core code. */
-+void xenbus_suspend(void);
-+void xenbus_resume(void);
-+
-+#define XENBUS_IS_ERR_READ(str) ({ \
-+ if (!IS_ERR(str) && strlen(str) == 0) { \
-+ kfree(str); \
-+ str = ERR_PTR(-ERANGE); \
-+ } \
-+ IS_ERR(str); \
-+})
-+
-+#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
-+
-+
-+/**
-+ * Register a watch on the given path, using the given xenbus_watch structure
-+ * for storage, and the given callback function as the callback. Return 0 on
-+ * success, or -errno on error. On success, the given path will be saved as
-+ * watch->node, and remains the caller's to free. On error, watch->node will
-+ * be NULL, the device will switch to XenbusStateClosing, and the error will
-+ * be saved in the store.
-+ */
-+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
-+ struct xenbus_watch *watch,
-+ void (*callback)(struct xenbus_watch *,
-+ const char **, unsigned int));
-+
-+
-+/**
-+ * Register a watch on the given path/path2, using the given xenbus_watch
-+ * structure for storage, and the given callback function as the callback.
-+ * Return 0 on success, or -errno on error. On success, the watched path
-+ * (path/path2) will be saved as watch->node, and becomes the caller's to
-+ * kfree(). On error, watch->node will be NULL, so the caller has nothing to
-+ * free, the device will switch to XenbusStateClosing, and the error will be
-+ * saved in the store.
-+ */
-+int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
-+ const char *path2, struct xenbus_watch *watch,
-+ void (*callback)(struct xenbus_watch *,
-+ const char **, unsigned int));
-+
-+
-+/**
-+ * Advertise in the store a change of the given driver to the given new_state.
-+ * Perform the change inside the given transaction xbt. xbt may be NULL, in
-+ * which case this is performed inside its own transaction. Return 0 on
-+ * success, or -errno on error. On error, the device will switch to
-+ * XenbusStateClosing, and the error will be saved in the store.
-+ */
-+int xenbus_switch_state(struct xenbus_device *dev,
-+ xenbus_transaction_t xbt,
-+ XenbusState new_state);
-+
-+
-+/**
-+ * Grant access to the given ring_mfn to the peer of the given device. Return
-+ * 0 on success, or -errno on error. On error, the device will switch to
-+ * XenbusStateClosing, and the error will be saved in the store.
-+ */
-+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
-+
-+
-+/**
-+ * Map a page of memory into this domain from another domain's grant table.
-+ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
-+ * page to that address, and sets *vaddr to that address.
-+ * xenbus_map_ring does not allocate the virtual address space (you must do
-+ * this yourself!). It only maps in the page to the specified address.
-+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
-+ * or -ENOMEM on error. If an error is returned, device will switch to
-+ * XenbusStateClosing and the error message will be saved in XenStore.
-+ */
-+int xenbus_map_ring_valloc(struct xenbus_device *dev,
-+ int gnt_ref, void **vaddr);
-+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
-+ grant_handle_t *handle, void *vaddr);
-+
-+
-+/**
-+ * Unmap a page of memory in this domain that was imported from another domain.
-+ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
-+ * xenbus_map_ring_valloc (it will free the virtual address space).
-+ * Returns 0 on success and returns GNTST_* on error
-+ * (see xen/include/interface/grant_table.h).
-+ */
-+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
-+int xenbus_unmap_ring(struct xenbus_device *dev,
-+ grant_handle_t handle, void *vaddr);
-+
-+
-+/**
-+ * Allocate an event channel for the given xenbus_device, assigning the newly
-+ * created local port to *port. Return 0 on success, or -errno on error. On
-+ * error, the device will switch to XenbusStateClosing, and the error will be
-+ * saved in the store.
-+ */
-+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
-+
-+
-+/**
-+ * Bind to an existing interdomain event channel in another domain. Returns 0
-+ * on success and stores the local port in *port. On error, returns -errno,
-+ * switches the device to XenbusStateClosing, and saves the error in XenStore.
-+ */
-+int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port);
-+
-+
-+/**
-+ * Free an existing event channel. Returns 0 on success or -errno on error.
-+ */
-+int xenbus_free_evtchn(struct xenbus_device *dev, int port);
-+
-+
-+/**
-+ * Return the state of the driver rooted at the given store path, or
-+ * XenbusStateClosed if no state can be read.
-+ */
-+XenbusState xenbus_read_driver_state(const char *path);
-+
-+
-+/***
-+ * Report the given negative errno into the store, along with the given
-+ * formatted message.
-+ */
-+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+ ...);
-+
-+
-+/***
-+ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
-+ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
-+ * closedown of this driver and its peer.
-+ */
-+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
-+ ...);
-+
-+
-+#endif /* _XEN_XENBUS_H */
-+
-+/*
-+ * Local variables:
-+ * c-file-style: "linux"
-+ * indent-tabs-mode: t
-+ * c-indent-level: 8
-+ * c-basic-offset: 8
-+ * tab-width: 8
-+ * End:
-+ */
-diff --git a/include/xen/xencons.h b/include/xen/xencons.h
-new file mode 100644
-index 0000000..fa2160d
---- /dev/null
-+++ b/include/xen/xencons.h
-@@ -0,0 +1,14 @@
-+#ifndef __ASM_XENCONS_H__
-+#define __ASM_XENCONS_H__
-+
-+void xencons_force_flush(void);
-+void xencons_resume(void);
-+
-+/* Interrupt work hooks. Receive data, or kick data out. */
-+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
-+void xencons_tx(void);
-+
-+int xencons_ring_init(void);
-+int xencons_ring_send(const char *data, unsigned len);
-+
-+#endif /* __ASM_XENCONS_H__ */
-diff --git a/init/Kconfig b/init/Kconfig
-index 38416a1..2c3ece9 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -224,7 +224,7 @@ config UID16
- This enables the legacy 16-bit UID syscall wrappers.
-
- config VM86
-- depends X86
-+ depends on X86
- default y
- bool "Enable VM86 support" if EMBEDDED
- help
-diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index 0b46a5d..17ab322 100644
---- a/kernel/Kconfig.preempt
-+++ b/kernel/Kconfig.preempt
-@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
-
- config PREEMPT
- bool "Preemptible Kernel (Low-Latency Desktop)"
-+ depends on !XEN
- help
- This option reduces the latency of the kernel by making
- all kernel code (that is not executing in a critical section)
-diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 97d5559..0139444 100644
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -164,9 +164,14 @@ int can_request_irq(unsigned int irq, un
- return !action;
- }
-
--/*
-- * Internal function to register an irqaction - typically used to
-- * allocate special interrupts that are part of the architecture.
-+/**
-+ * setup_irq - register an irqaction structure
-+ * @irq: Interrupt to register
-+ * @irqaction: The irqaction structure to be registered
-+ *
-+ * Normally called by request_irq, this function can be used
-+ * directly to allocate special interrupts that are part of the
-+ * architecture.
- */
- int setup_irq(unsigned int irq, struct irqaction * new)
- {
-@@ -238,28 +243,30 @@ int setup_irq(unsigned int irq, struct i
- return 0;
- }
-
--/**
-- * free_irq - free an interrupt
-- * @irq: Interrupt line to free
-- * @dev_id: Device identity to free
-+/*
-+ * teardown_irq - unregister an irqaction
-+ * @irq: Interrupt line being freed
-+ * @old: Pointer to the irqaction that is to be unregistered
- *
-- * Remove an interrupt handler. The handler is removed and if the
-- * interrupt line is no longer in use by any driver it is disabled.
-- * On a shared IRQ the caller must ensure the interrupt is disabled
-- * on the card it drives before calling this function. The function
-- * does not return until any executing interrupts for this IRQ
-- * have completed.
-+ * This function is called by free_irq and does the actual
-+ * business of unregistering the handler. It exists as a
-+ * seperate function to enable handlers to be unregistered
-+ * for irqactions that have been allocated statically at
-+ * boot time.
- *
- * This function must not be called from interrupt context.
- */
--void free_irq(unsigned int irq, void *dev_id)
-+#ifndef CONFIG_XEN
-+static
-+#endif
-+int teardown_irq(unsigned int irq, struct irqaction * old)
- {
- struct irq_desc *desc;
- struct irqaction **p;
- unsigned long flags;
-
- if (irq >= NR_IRQS)
-- return;
-+ return -ENOENT;
-
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock,flags);
-@@ -271,7 +278,7 @@ void free_irq(unsigned int irq, void *de
- struct irqaction **pp = p;
-
- p = &action->next;
-- if (action->dev_id != dev_id)
-+ if (action != old)
- continue;
-
- /* Found it - now remove it from the list of entries */
-@@ -295,13 +302,52 @@ void free_irq(unsigned int irq, void *de
-
- /* Make sure it's not being used on another CPU */
- synchronize_irq(irq);
-- kfree(action);
-- return;
-+ return 0;
- }
-- printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
-+ printk(KERN_ERR "Trying to teardown free IRQ%d\n",irq);
-+ spin_unlock_irqrestore(&desc->lock,flags);
-+ return -ENOENT;
-+ }
-+}
-+
-+/**
-+ * free_irq - free an interrupt
-+ * @irq: Interrupt line to free
-+ * @dev_id: Device identity to free
-+ *
-+ * Remove an interrupt handler. The handler is removed and if the
-+ * interrupt line is no longer in use by any driver it is disabled.
-+ * On a shared IRQ the caller must ensure the interrupt is disabled
-+ * on the card it drives before calling this function. The function
-+ * does not return until any executing interrupts for this IRQ
-+ * have completed.
-+ *
-+ * This function must not be called from interrupt context.
-+ */
-+void free_irq(unsigned int irq, void *dev_id)
-+{
-+ struct irq_desc *desc;
-+ struct irqaction *action;
-+ unsigned long flags;
-+
-+ if (irq >= NR_IRQS)
-+ return;
-+
-+ desc = irq_desc + irq;
-+ spin_lock_irqsave(&desc->lock,flags);
-+ for (action = desc->action; action != NULL; action = action->next) {
-+ if (action->dev_id != dev_id)
-+ continue;
-+
- spin_unlock_irqrestore(&desc->lock,flags);
-+
-+ if (teardown_irq(irq, action) == 0)
-+ kfree(action);
- return;
- }
-+ printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
-+ spin_unlock_irqrestore(&desc->lock,flags);
-+ return;
- }
-
- EXPORT_SYMBOL(free_irq);
-diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index a314e66..f50aa3a 100644
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -145,7 +145,7 @@ config DEBUG_BUGVERBOSE
-
- config DEBUG_INFO
- bool "Compile the kernel with debug info"
-- depends on DEBUG_KERNEL
-+ depends on DEBUG_KERNEL && !X86_64_XEN
- help
- If you say Y here the resulting kernel image will include
- debugging info resulting in a larger kernel image.
-diff --git a/lib/Makefile b/lib/Makefile
-index 648b2c1..21cf76f 100644
---- a/lib/Makefile
-+++ b/lib/Makefile
-@@ -45,6 +45,9 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
- obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
-
- obj-$(CONFIG_SWIOTLB) += swiotlb.o
-+ifneq ($(CONFIG_IA64),y)
-+swiotlb-$(CONFIG_XEN) := ../arch/i386/kernel/swiotlb.o
-+endif
-
- hostprogs-y := gen_crc32table
- clean-files := crc32table.h
-diff --git a/mm/Kconfig b/mm/Kconfig
-index a9cb80a..909b2a0 100644
---- a/mm/Kconfig
-+++ b/mm/Kconfig
-@@ -126,11 +126,14 @@ comment "Memory hotplug is currently inc
- # Default to 4 for wider testing, though 8 might be more appropriate.
- # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
- # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
-+# XEN uses the mapping field on pagetable pages to store a pointer to
-+# the destructor.
- #
- config SPLIT_PTLOCK_CPUS
- int
- default "4096" if ARM && !CPU_CACHE_VIPT
- default "4096" if PARISC && !PA20
-+ default "4096" if XEN
- default "4"
-
- #
-diff --git a/mm/highmem.c b/mm/highmem.c
-index ce2e7e8..b29bf62 100644
---- a/mm/highmem.c
-+++ b/mm/highmem.c
-@@ -152,6 +152,17 @@ start:
- return vaddr;
- }
-
-+#ifdef CONFIG_XEN
-+void kmap_flush_unused(void)
-+{
-+ spin_lock(&kmap_lock);
-+ flush_all_zero_pkmaps();
-+ spin_unlock(&kmap_lock);
-+}
-+
-+EXPORT_SYMBOL(kmap_flush_unused);
-+#endif
-+
- void fastcall *kmap_high(struct page *page)
- {
- unsigned long vaddr;
-diff --git a/mm/memory.c b/mm/memory.c
-index 9abc600..351b316 100644
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -405,7 +405,8 @@ struct page *vm_normal_page(struct vm_ar
- * Remove this test eventually!
- */
- if (unlikely(!pfn_valid(pfn))) {
-- print_bad_pte(vma, pte, addr);
-+ if (!vma->vm_flags & VM_RESERVED)
-+ print_bad_pte(vma, pte, addr);
- return NULL;
- }
-
-@@ -1019,6 +1020,23 @@ int get_user_pages(struct task_struct *t
- continue;
- }
-
-+#ifdef CONFIG_XEN
-+ if (vma && (vma->vm_flags & VM_FOREIGN)) {
-+ struct page **map = vma->vm_private_data;
-+ int offset = (start - vma->vm_start) >> PAGE_SHIFT;
-+
-+ if (map[offset] != NULL) {
-+ if (pages)
-+ pages[i] = map[offset];
-+ if (vmas)
-+ vmas[i] = vma;
-+ i++;
-+ start += PAGE_SIZE;
-+ len--;
-+ continue;
-+ }
-+ }
-+#endif
- if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
- || !(vm_flags & vma->vm_flags))
- return i ? : -EFAULT;
-@@ -1358,6 +1376,98 @@ int remap_pfn_range(struct vm_area_struc
- }
- EXPORT_SYMBOL(remap_pfn_range);
-
-+#ifdef CONFIG_XEN
-+static inline int generic_pte_range(struct mm_struct *mm, pmd_t *pmd,
-+ unsigned long addr, unsigned long end,
-+ pte_fn_t fn, void *data)
-+{
-+ pte_t *pte;
-+ int err;
-+ struct page *pte_page;
-+
-+ pte = (mm == &init_mm) ?
-+ pte_alloc_kernel(pmd, addr) :
-+ pte_alloc_map(mm, pmd, addr);
-+ if (!pte)
-+ return -ENOMEM;
-+
-+ pte_page = pmd_page(*pmd);
-+
-+ do {
-+ err = fn(pte, pte_page, addr, data);
-+ if (err)
-+ break;
-+ } while (pte++, addr += PAGE_SIZE, addr != end);
-+
-+ if (mm != &init_mm)
-+ pte_unmap(pte-1);
-+ return err;
-+}
-+
-+static inline int generic_pmd_range(struct mm_struct *mm, pud_t *pud,
-+ unsigned long addr, unsigned long end,
-+ pte_fn_t fn, void *data)
-+{
-+ pmd_t *pmd;
-+ unsigned long next;
-+ int err;
-+
-+ pmd = pmd_alloc(mm, pud, addr);
-+ if (!pmd)
-+ return -ENOMEM;
-+ do {
-+ next = pmd_addr_end(addr, end);
-+ err = generic_pte_range(mm, pmd, addr, next, fn, data);
-+ if (err)
-+ break;
-+ } while (pmd++, addr = next, addr != end);
-+ return err;
-+}
-+
-+static inline int generic_pud_range(struct mm_struct *mm, pgd_t *pgd,
-+ unsigned long addr, unsigned long end,
-+ pte_fn_t fn, void *data)
-+{
-+ pud_t *pud;
-+ unsigned long next;
-+ int err;
-+
-+ pud = pud_alloc(mm, pgd, addr);
-+ if (!pud)
-+ return -ENOMEM;
-+ do {
-+ next = pud_addr_end(addr, end);
-+ err = generic_pmd_range(mm, pud, addr, next, fn, data);
-+ if (err)
-+ break;
-+ } while (pud++, addr = next, addr != end);
-+ return err;
-+}
-+
-+/*
-+ * Scan a region of virtual memory, filling in page tables as necessary
-+ * and calling a provided function on each leaf page table.
-+ */
-+int generic_page_range(struct mm_struct *mm, unsigned long addr,
-+ unsigned long size, pte_fn_t fn, void *data)
-+{
-+ pgd_t *pgd;
-+ unsigned long next;
-+ unsigned long end = addr + size;
-+ int err;
-+
-+ BUG_ON(addr >= end);
-+ pgd = pgd_offset(mm, addr);
-+ do {
-+ next = pgd_addr_end(addr, end);
-+ err = generic_pud_range(mm, pgd, addr, next, fn, data);
-+ if (err)
-+ break;
-+ } while (pgd++, addr = next, addr != end);
-+ return err;
-+}
-+#endif
-+
- /*
- * handle_pte_fault chooses page fault handler according to an entry
- * which was read non-atomically. Before making any commitment, on
-diff --git a/mm/mmap.c b/mm/mmap.c
-index 47556d2..a01e3ff 100644
---- a/mm/mmap.c
-+++ b/mm/mmap.c
-@@ -1937,6 +1937,10 @@ void exit_mmap(struct mm_struct *mm)
- unsigned long nr_accounted = 0;
- unsigned long end;
-
-+#ifdef arch_exit_mmap
-+ arch_exit_mmap(mm);
-+#endif
-+
- lru_add_drain();
- flush_cache_mm(mm);
- tlb = tlb_gather_mmu(mm, 1);
-diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 234bd48..3ef8362 100644
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -418,7 +418,8 @@ static void __free_pages_ok(struct page
- int i;
- int reserved = 0;
-
-- arch_free_page(page, order);
-+ if (arch_free_page(page, order))
-+ return;
- if (!PageHighMem(page))
- mutex_debug_check_no_locks_freed(page_address(page),
- PAGE_SIZE<<order);
-@@ -711,7 +712,8 @@ static void fastcall free_hot_cold_page(
- struct per_cpu_pages *pcp;
- unsigned long flags;
-
-- arch_free_page(page, 0);
-+ if (arch_free_page(page, 0))
-+ return;
-
- if (PageAnon(page))
- page->mapping = NULL;
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 2afb0de..30baa34 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -116,6 +116,12 @@
- #endif /* CONFIG_NET_RADIO */
- #include <asm/current.h>
-
-+#ifdef CONFIG_XEN
-+#include <net/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#endif
-+
- /*
- * The list of packet types we will receive (as opposed to discard)
- * and the routines to invoke.
-@@ -1260,6 +1266,37 @@ int dev_queue_xmit(struct sk_buff *skb)
- __skb_linearize(skb, GFP_ATOMIC))
- goto out_kfree_skb;
-
-+#ifdef CONFIG_XEN
-+ /* If a checksum-deferred packet is forwarded to a device that needs a
-+ * checksum, correct the pointers and force checksumming.
-+ */
-+ if (skb->proto_csum_blank) {
-+ if (skb->protocol != htons(ETH_P_IP))
-+ goto out_kfree_skb;
-+ skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
-+ if (skb->h.raw >= skb->tail)
-+ goto out_kfree_skb;
-+ switch (skb->nh.iph->protocol) {
-+ case IPPROTO_TCP:
-+ skb->csum = offsetof(struct tcphdr, check);
-+ break;
-+ case IPPROTO_UDP:
-+ skb->csum = offsetof(struct udphdr, check);
-+ break;
-+ default:
-+ if (net_ratelimit())
-+ printk(KERN_ERR "Attempting to checksum a non-"
-+ "TCP/UDP packet, dropping a protocol"
-+ " %d packet", skb->nh.iph->protocol);
-+ rc = -EPROTO;
-+ goto out_kfree_skb;
-+ }
-+ if ((skb->h.raw + skb->csum + 2) > skb->tail)
-+ goto out_kfree_skb;
-+ skb->ip_summed = CHECKSUM_HW;
-+ }
-+#endif
-+
- /* If packet is not checksummed and device does not support
- * checksumming for this protocol, complete checksumming here.
- */
-@@ -1609,6 +1646,19 @@ int netif_receive_skb(struct sk_buff *sk
- }
- #endif
-
-+#ifdef CONFIG_XEN
-+ switch (skb->ip_summed) {
-+ case CHECKSUM_UNNECESSARY:
-+ skb->proto_csum_valid = 1;
-+ break;
-+ case CHECKSUM_HW:
-+ /* XXX Implement me. */
-+ default:
-+ skb->proto_csum_valid = 0;
-+ break;
-+ }
-+#endif
-+
- list_for_each_entry_rcu(ptype, &ptype_all, list) {
- if (!ptype->dev || ptype->dev == skb->dev) {
- if (pt_prev)
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 2144952..dadc9cf 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -132,6 +132,7 @@ void skb_under_panic(struct sk_buff *skb
- * Buffers may only be allocated from interrupts using a @gfp_mask of
- * %GFP_ATOMIC.
- */
-+#ifndef CONFIG_HAVE_ARCH_ALLOC_SKB
- struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
- int fclone)
- {
-@@ -186,6 +187,7 @@ nodata:
- skb = NULL;
- goto out;
- }
-+#endif /* !CONFIG_HAVE_ARCH_ALLOC_SKB */
-
- /**
- * alloc_skb_from_cache - allocate a network buffer
-@@ -203,14 +205,18 @@ nodata:
- */
- struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
- unsigned int size,
-- gfp_t gfp_mask)
-+ gfp_t gfp_mask,
-+ int fclone)
- {
-+ kmem_cache_t *cache;
-+ struct skb_shared_info *shinfo;
- struct sk_buff *skb;
- u8 *data;
-
-+ cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
-+
- /* Get the HEAD */
-- skb = kmem_cache_alloc(skbuff_head_cache,
-- gfp_mask & ~__GFP_DMA);
-+ skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
- if (!skb)
- goto out;
-
-@@ -227,16 +233,29 @@ struct sk_buff *alloc_skb_from_cache(kme
- skb->data = data;
- skb->tail = data;
- skb->end = data + size;
-+ /* make sure we initialize shinfo sequentially */
-+ shinfo = skb_shinfo(skb);
-+ atomic_set(&shinfo->dataref, 1);
-+ shinfo->nr_frags = 0;
-+ shinfo->tso_size = 0;
-+ shinfo->tso_segs = 0;
-+ shinfo->ufo_size = 0;
-+ shinfo->ip6_frag_id = 0;
-+ shinfo->frag_list = NULL;
-
-- atomic_set(&(skb_shinfo(skb)->dataref), 1);
-- skb_shinfo(skb)->nr_frags = 0;
-- skb_shinfo(skb)->tso_size = 0;
-- skb_shinfo(skb)->tso_segs = 0;
-- skb_shinfo(skb)->frag_list = NULL;
-+ if (fclone) {
-+ struct sk_buff *child = skb + 1;
-+ atomic_t *fclone_ref = (atomic_t *) (child + 1);
-+
-+ skb->fclone = SKB_FCLONE_ORIG;
-+ atomic_set(fclone_ref, 1);
-+
-+ child->fclone = SKB_FCLONE_UNAVAILABLE;
-+ }
- out:
- return skb;
- nodata:
-- kmem_cache_free(skbuff_head_cache, skb);
-+ kmem_cache_free(cache, skb);
- skb = NULL;
- goto out;
- }
-@@ -408,6 +427,10 @@ struct sk_buff *skb_clone(struct sk_buff
- C(local_df);
- n->cloned = 1;
- n->nohdr = 0;
-+#ifdef CONFIG_XEN
-+ C(proto_csum_valid);
-+ C(proto_csum_blank);
-+#endif
- C(pkt_type);
- C(ip_summed);
- C(priority);
-diff --git a/scripts/Makefile.xen b/scripts/Makefile.xen
-new file mode 100644
-index 0000000..b3ec53a
---- /dev/null
-+++ b/scripts/Makefile.xen
-@@ -0,0 +1,14 @@
-+
-+# cherrypickxen($1 = allobj)
-+cherrypickxen = $(foreach var, $(1), \
-+ $(shell o=$(var); \
-+ c=$${o/%.o/-xen.c}; \
-+ s=$${o/%.o/-xen.S}; \
-+ oxen=$${o/%.o/-xen.o}; \
-+ [ -f $(srctree)/$(src)/$${c} ] || \
-+ [ -f $(srctree)/$(src)/$${s} ] \
-+ && echo $$oxen \
-+ || echo $(var) ) \
-+ )
-+# filterxen($1 = allobj, $2 = noobjs)
-+filterxen = $(filter-out $(2), $(1))