xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: 류재민 <jm77.ryu@samsung.com>
To: Jae-Min Ryu <jm77.ryu@samsung.com>,
	Lars Kurth <lars.kurth@citrix.com>,
	Ian Campbell <Ian.Campbell@citrix.com>,
	Stefano Stabellini <Stefano.Stabellini@eu.citrix.com>,
	"Keir (Xen.org)" <keir@xen.org>,
	Ian Jackson <Ian.Jackson@eu.citrix.com>,
	"xen-arm@lists.xensource.com" <xen-arm@lists.xensource.com>,
	"xen-devel@lists.xensource.com" <xen-devel@lists.xensource.com>
Cc: 서상범 <sbuk.suh@samsung.com>
Subject: [PATCH 02/14] arm: import the files required to "arm" port.
Date: Mon, 13 Feb 2012 07:53:10 +0000 (GMT)	[thread overview]
Message-ID: <0LZB0032AN8MRKB0@mailout2.samsung.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 5990 bytes --]

arm: import the files required to "arm" port.

config/arm.mk                      |   28 +++
 xen/arch/arm/Makefile              |   47 +++++
 xen/arch/arm/Rules.mk              |   25 +++
 xen/arch/arm/lib/Makefile          |   11 +
 xen/arch/arm/lib/ashldi3.S         |   45 +++++
 xen/arch/arm/lib/ashrdi3.S         |   46 +++++
 xen/arch/arm/lib/bpabi-asm.S       |   55 ++++++
 xen/arch/arm/lib/bpabi.c           |   51 ++++++
 xen/arch/arm/lib/clearbit.S        |   24 ++
 xen/arch/arm/lib/copy_template.S   |  255 ++++++++++++++++++++++++++++++
 xen/arch/arm/lib/delay.S           |    7 +
 xen/arch/arm/lib/div64.S           |  199 ++++++++++++++++++++++++
 xen/arch/arm/lib/findbit.S         |   81 +++++++++
 xen/arch/arm/lib/gcclib.h          |   33 ++++
 xen/arch/arm/lib/getuser.S         |   77 +++++++++
 xen/arch/arm/lib/lib1funcs.S       |  256 +++++++++++++++++++++++++++++++
 xen/arch/arm/lib/longlong.h        |  183 ++++++++++++++++++++++
 xen/arch/arm/lib/lshrdi3.S         |   17 ++
 xen/arch/arm/lib/math.c            |    3 +
 xen/arch/arm/lib/memchr.S          |   14 +
 xen/arch/arm/lib/memcpy.S          |   60 +++++++
 xen/arch/arm/lib/memmove.S         |  207 +++++++++++++++++++++++++
 xen/arch/arm/lib/memory.S          |  421 +++++++++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/lib/memset.S          |   69 ++++++++
 xen/arch/arm/lib/memzero.S         |   71 ++++++++
 xen/arch/arm/lib/muldi3.c          |   86 ++++++++++
 xen/arch/arm/lib/putuser.S         |   75 +++++++++
 xen/arch/arm/lib/setbit.S          |   22 ++
 xen/arch/arm/lib/strchr.S          |   15 +
 xen/arch/arm/lib/testchangebit.S   |   22 ++
 xen/arch/arm/lib/testclearbit.S    |   22 ++
 xen/arch/arm/lib/testsetbit.S      |   20 ++
 xen/arch/arm/lib/uaccess.S         |  684 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/lib/udivdi3.c         |  242 +++++++++++++++++++++++++++++
 xen/arch/arm/lib/uldivmod.S        |  148 +++++++++++++++++
 xen/arch/arm/tegra/Makefile        |    1 +
 xen/arch/arm/tegra/Rules.mk        |    1 +
 xen/arch/arm/tegra/dummy.c         |    3 +
 xen/arch/arm/xen/Makefile          |   19 ++
 xen/arch/arm/xen/arch_domain.c     |  212 +++++++++++++++++++++++++
 xen/arch/arm/xen/arch_domctl.c     |   43 +++++
 xen/arch/arm/xen/arch_sysctl.c     |   38 ++++
 xen/arch/arm/xen/asm-offsets.c     |   40 ++++
 xen/arch/arm/xen/bug.c             |   32 +++
 xen/arch/arm/xen/cpu.c             |   97 +++++++++++
 xen/arch/arm/xen/crash.c           |   25 +++
 xen/arch/arm/xen/domain_build.c    |   47 +++++
 xen/arch/arm/xen/domain_page.c     |   22 ++
 xen/arch/arm/xen/fault.c           |  123 ++++++++++++++
 xen/arch/arm/xen/grant_table.c     |   53 ++++++
 xen/arch/arm/xen/iommu.c           |   24 ++
 xen/arch/arm/xen/irq.c             |   84 ++++++++++
 xen/arch/arm/xen/machine_kexec.c   |   31 +++
 xen/arch/arm/xen/mm.c              |  194 +++++++++++++++++++++++
 xen/arch/arm/xen/p2m.c             |   44 +++++
 xen/arch/arm/xen/pci.c             |   74 ++++++++
 xen/arch/arm/xen/perfmon.c         |   26 +++
 xen/arch/arm/xen/setup.c           |   64 +++++++
 xen/arch/arm/xen/shutdown.c        |   38 ++++
 xen/arch/arm/xen/time.c            |   83 ++++++++++
 xen/arch/arm/xen/tlb.c             |   26 +++
 xen/arch/arm/xen/xen.lds.S         |  159 +++++++++++++++++++
 xen/include/asm-arm/acpi.h         |    8 +
 xen/include/asm-arm/asm-macros.h   |  106 ++++++++++++
 xen/include/asm-arm/atomic.h       |  179 +++++++++++++++++++++
 xen/include/asm-arm/bitops.h       |  193 +++++++++++++++++++++++
 xen/include/asm-arm/bug.h          |   32 +++
 xen/include/asm-arm/byteorder.h    |    9 +
 xen/include/asm-arm/cache.h        |   11 +
 xen/include/asm-arm/config.h       |   61 +++++++
 xen/include/asm-arm/cpu-domain.h   |   39 ++++
 xen/include/asm-arm/current.h      |   73 ++++++++
 xen/include/asm-arm/debugger.h     |   24 ++
 xen/include/asm-arm/delay.h        |    6 +
 xen/include/asm-arm/div64.h        |   43 +++++
 xen/include/asm-arm/domain.h       |   79 +++++++++
 xen/include/asm-arm/elf.h          |   53 ++++++
 xen/include/asm-arm/event.h        |   39 ++++
 xen/include/asm-arm/flushtlb.h     |   25 +++
 xen/include/asm-arm/grant_table.h  |   62 +++++++
 xen/include/asm-arm/guest_access.h |  136 ++++++++++++++++
 xen/include/asm-arm/hardirq.h      |   21 ++
 xen/include/asm-arm/hypercall.h    |   68 ++++++++
 xen/include/asm-arm/init.h         |    4 +
 xen/include/asm-arm/io.h           |   32 +++
 xen/include/asm-arm/iocap.h        |   15 +
 xen/include/asm-arm/iommu.h        |   14 +
 xen/include/asm-arm/irq.h          |   50 ++++++
 xen/include/asm-arm/mm.h           |  237 ++++++++++++++++++++++++++++
 xen/include/asm-arm/mmu.h          |   11 +
 xen/include/asm-arm/multicall.h    |    9 +
 xen/include/asm-arm/numa.h         |   21 ++
 xen/include/asm-arm/p2m.h          |   10 +
 xen/include/asm-arm/page.h         |   95 +++++++++++
 xen/include/asm-arm/pci.h          |    9 +
 xen/include/asm-arm/percpu.h       |   16 +
 xen/include/asm-arm/processor.h    |  219 ++++++++++++++++++++++++++
 xen/include/asm-arm/regs.h         |   17 ++
 xen/include/asm-arm/smp.h          |   28 +++
 xen/include/asm-arm/softirq.h      |   11 +
 xen/include/asm-arm/spinlock.h     |  200 ++++++++++++++++++++++++
 xen/include/asm-arm/string.h       |   49 +++++
 xen/include/asm-arm/system.h       |  148 +++++++++++++++++
 xen/include/asm-arm/tegra/config.h |   11 +
 xen/include/asm-arm/time.h         |   24 ++
 xen/include/asm-arm/trace.h        |    6 +
 xen/include/asm-arm/types.h        |   58 +++++++
 xen/include/asm-arm/xenoprof.h     |   43 +++++
 xen/include/public/arch-arm.h      |  180 +++++++++++++++++++++
 109 files changed, 8008 insertions(+), 0 deletions(-)

Signed-off-by: Jaemin Ryu <jm77.ryu@samsung.com>


[-- Attachment #2: patch02.diff --]
[-- Type: application/octet-stream, Size: 225168 bytes --]

arm: import the files required to "arm" port.

 config/arm.mk                      |   28 +++
 xen/arch/arm/Makefile              |   47 +++++
 xen/arch/arm/Rules.mk              |   25 +++
 xen/arch/arm/lib/Makefile          |   11 +
 xen/arch/arm/lib/ashldi3.S         |   45 +++++
 xen/arch/arm/lib/ashrdi3.S         |   46 +++++
 xen/arch/arm/lib/bpabi-asm.S       |   55 ++++++
 xen/arch/arm/lib/bpabi.c           |   51 ++++++
 xen/arch/arm/lib/clearbit.S        |   24 ++
 xen/arch/arm/lib/copy_template.S   |  255 ++++++++++++++++++++++++++++++
 xen/arch/arm/lib/delay.S           |    7 +
 xen/arch/arm/lib/div64.S           |  199 ++++++++++++++++++++++++
 xen/arch/arm/lib/findbit.S         |   81 +++++++++
 xen/arch/arm/lib/gcclib.h          |   33 ++++
 xen/arch/arm/lib/getuser.S         |   77 +++++++++
 xen/arch/arm/lib/lib1funcs.S       |  256 +++++++++++++++++++++++++++++++
 xen/arch/arm/lib/longlong.h        |  183 ++++++++++++++++++++++
 xen/arch/arm/lib/lshrdi3.S         |   17 ++
 xen/arch/arm/lib/math.c            |    3 +
 xen/arch/arm/lib/memchr.S          |   14 +
 xen/arch/arm/lib/memcpy.S          |   60 +++++++
 xen/arch/arm/lib/memmove.S         |  207 +++++++++++++++++++++++++
 xen/arch/arm/lib/memory.S          |  421 +++++++++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/lib/memset.S          |   69 ++++++++
 xen/arch/arm/lib/memzero.S         |   71 ++++++++
 xen/arch/arm/lib/muldi3.c          |   86 ++++++++++
 xen/arch/arm/lib/putuser.S         |   75 +++++++++
 xen/arch/arm/lib/setbit.S          |   22 ++
 xen/arch/arm/lib/strchr.S          |   15 +
 xen/arch/arm/lib/testchangebit.S   |   22 ++
 xen/arch/arm/lib/testclearbit.S    |   22 ++
 xen/arch/arm/lib/testsetbit.S      |   20 ++
 xen/arch/arm/lib/uaccess.S         |  684 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/arm/lib/udivdi3.c         |  242 +++++++++++++++++++++++++++++
 xen/arch/arm/lib/uldivmod.S        |  148 +++++++++++++++++
 xen/arch/arm/tegra/Makefile        |    1 +
 xen/arch/arm/tegra/Rules.mk        |    1 +
 xen/arch/arm/tegra/dummy.c         |    3 +
 xen/arch/arm/xen/Makefile          |   19 ++
 xen/arch/arm/xen/arch_domain.c     |  212 +++++++++++++++++++++++++
 xen/arch/arm/xen/arch_domctl.c     |   43 +++++
 xen/arch/arm/xen/arch_sysctl.c     |   38 ++++
 xen/arch/arm/xen/asm-offsets.c     |   40 ++++
 xen/arch/arm/xen/bug.c             |   32 +++
 xen/arch/arm/xen/cpu.c             |   97 +++++++++++
 xen/arch/arm/xen/crash.c           |   25 +++
 xen/arch/arm/xen/domain_build.c    |   47 +++++
 xen/arch/arm/xen/domain_page.c     |   22 ++
 xen/arch/arm/xen/fault.c           |  123 ++++++++++++++
 xen/arch/arm/xen/grant_table.c     |   53 ++++++
 xen/arch/arm/xen/iommu.c           |   24 ++
 xen/arch/arm/xen/irq.c             |   84 ++++++++++
 xen/arch/arm/xen/machine_kexec.c   |   31 +++
 xen/arch/arm/xen/mm.c              |  194 +++++++++++++++++++++++
 xen/arch/arm/xen/p2m.c             |   44 +++++
 xen/arch/arm/xen/pci.c             |   74 ++++++++
 xen/arch/arm/xen/perfmon.c         |   26 +++
 xen/arch/arm/xen/setup.c           |   64 +++++++
 xen/arch/arm/xen/shutdown.c        |   38 ++++
 xen/arch/arm/xen/time.c            |   83 ++++++++++
 xen/arch/arm/xen/tlb.c             |   26 +++
 xen/arch/arm/xen/xen.lds.S         |  159 +++++++++++++++++++
 xen/include/asm-arm/acpi.h         |    8 +
 xen/include/asm-arm/asm-macros.h   |  106 ++++++++++++
 xen/include/asm-arm/atomic.h       |  179 +++++++++++++++++++++
 xen/include/asm-arm/bitops.h       |  193 +++++++++++++++++++++++
 xen/include/asm-arm/bug.h          |   32 +++
 xen/include/asm-arm/byteorder.h    |    9 +
 xen/include/asm-arm/cache.h        |   11 +
 xen/include/asm-arm/config.h       |   61 +++++++
 xen/include/asm-arm/cpu-domain.h   |   39 ++++
 xen/include/asm-arm/current.h      |   73 ++++++++
 xen/include/asm-arm/debugger.h     |   24 ++
 xen/include/asm-arm/delay.h        |    6 +
 xen/include/asm-arm/div64.h        |   43 +++++
 xen/include/asm-arm/domain.h       |   79 +++++++++
 xen/include/asm-arm/elf.h          |   53 ++++++
 xen/include/asm-arm/event.h        |   39 ++++
 xen/include/asm-arm/flushtlb.h     |   25 +++
 xen/include/asm-arm/grant_table.h  |   62 +++++++
 xen/include/asm-arm/guest_access.h |  136 ++++++++++++++++
 xen/include/asm-arm/hardirq.h      |   21 ++
 xen/include/asm-arm/hypercall.h    |   68 ++++++++
 xen/include/asm-arm/init.h         |    4 +
 xen/include/asm-arm/io.h           |   32 +++
 xen/include/asm-arm/iocap.h        |   15 +
 xen/include/asm-arm/iommu.h        |   14 +
 xen/include/asm-arm/irq.h          |   50 ++++++
 xen/include/asm-arm/mm.h           |  237 ++++++++++++++++++++++++++++
 xen/include/asm-arm/mmu.h          |   11 +
 xen/include/asm-arm/multicall.h    |    9 +
 xen/include/asm-arm/numa.h         |   21 ++
 xen/include/asm-arm/p2m.h          |   10 +
 xen/include/asm-arm/page.h         |   95 +++++++++++
 xen/include/asm-arm/pci.h          |    9 +
 xen/include/asm-arm/percpu.h       |   16 +
 xen/include/asm-arm/processor.h    |  219 ++++++++++++++++++++++++++
 xen/include/asm-arm/regs.h         |   17 ++
 xen/include/asm-arm/smp.h          |   28 +++
 xen/include/asm-arm/softirq.h      |   11 +
 xen/include/asm-arm/spinlock.h     |  200 ++++++++++++++++++++++++
 xen/include/asm-arm/string.h       |   49 +++++
 xen/include/asm-arm/system.h       |  148 +++++++++++++++++
 xen/include/asm-arm/tegra/config.h |   11 +
 xen/include/asm-arm/time.h         |   24 ++
 xen/include/asm-arm/trace.h        |    6 +
 xen/include/asm-arm/types.h        |   58 +++++++
 xen/include/asm-arm/xenoprof.h     |   43 +++++
 xen/include/public/arch-arm.h      |  180 +++++++++++++++++++++
 109 files changed, 8008 insertions(+), 0 deletions(-)

Signed-off-by: Jaemin Ryu <jm77.ryu@samsung.com>

diff -r e701461b1251 config/arm.mk
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/config/arm.mk	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,28 @@
+#
+# Cross Tool chain configuration
+#
+TOOLCHAIN_PREFIX = /opt/arm-none-linux-gnueabi-old/bin/arm-none-linux-gnueabi-
+
+#
+# Toolchain configuration
+#
+AS              = $(TOOLCHAIN_PREFIX)as
+LD              = $(TOOLCHAIN_PREFIX)ld
+CC              = $(TOOLCHAIN_PREFIX)gcc
+CPP             = $(TOOLCHAIN_PREFIX)gcc -E
+AR              = $(TOOLCHAIN_PREFIX)ar
+RANLIB          = $(TOOLCHAIN_PREFIX)ranlib
+NM              = $(TOOLCHAIN_PREFIX)nm
+STRIP           = $(TOOLCHAIN_PREFIX)strip
+OBJCOPY         = $(TOOLCHAIN_PREFIX)objcopy
+OBJDUMP         = $(TOOLCHAIN_PREFIX)objdump
+
+DISTDIR         ?= $(XEN_ROOT)/dist
+DESTDIR         ?= $(DISTDIR)/install
+
+INSTALL         = install
+INSTALL_DIR     = $(INSTALL) -d -m0755
+INSTALL_DATA    = $(INSTALL) -m0644
+INSTALL_PROG    = $(INSTALL) -m0755
+
+CONFIG_ARM	:= y
diff -r e701461b1251 xen/arch/arm/Makefile
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/Makefile	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,47 @@
+#
+# xen/arch/arm/Makefile
+#
+
+ifndef TARGET_SUBARCH
+$(error XEN_TARGET_SUBARCH must be supplied. See Config.mk file)
+endif
+
+subdir-y += $(TARGET_SUBARCH) xen lib
+
+OBJCOPYFLAGS    :=-O binary -R .note -R .comment -S
+
+$(TARGET): $(TARGET)-syms
+	$(NM) -n $< | grep -v ' [aUw] ' > $(@D)/System.map
+	$(OBJCOPY) -O binary -R .note -R .comment -S $< $@
+
+$(TARGET)-syms: xen.lds $(ALL_OBJS) 
+	$(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/common/symbols-dummy.o
+	$(LD) $(LDFLAGS) -T xen.lds -N -Map $(@D)/.$(@F).0.map $(ALL_OBJS) \
+	$(BASEDIR)/common/symbols-dummy.o -o $(@D)/.$(@F).0
+	$(NM) -n $(@D)/.$(@F).0 | $(BASEDIR)/tools/symbols >$(@D)/.$(@F).0.S
+	$(MAKE) -f $(BASEDIR)/Rules.mk $(@D)/.$(@F).0.o
+	$(LD) $(LDFLAGS) -T xen.lds -N -Map $(@D)/.$(@F).1.map $(ALL_OBJS) \
+	$(@D)/.$(@F).0.o -o $(@D)/.$(@F).1
+	$(NM) -n $(@D)/.$(@F).1 | $(BASEDIR)/tools/symbols >$(@D)/.$(@F).1.S
+	$(MAKE) -f $(BASEDIR)/Rules.mk $(@D)/.$(@F).1.o
+	$(LD) $(LDFLAGS) -T xen.lds -N -Map $@.map $(ALL_OBJS) \
+	$(@D)/.$(@F).1.o -o $@
+	rm -f $(@D)/.$(@F).[0-9]*
+
+
+xen.lds: $(BASEDIR)/include/asm/arch
+	$(CC) -E $(CFLAGS) -P $(AFLAGS) -o $@ xen/xen.lds.S
+
+clean:: FORCE
+	rm -f xen-bin xen-syms xen.lds asm-offsets.s
+	rm -f *.o $(TARGET_SUBARCH)/*.o lib/*.o xen/*.o xen.lds
+	rm -f $(BASEDIR)/include/asm-arm/arch
+	rm -f $(BASEDIR)/include/asm
+
+asm-offsets.s: $(BASEDIR)/include/asm/arch
+	$(CC) $(CFLAGS) -S -o $@ xen/asm-offsets.c
+
+$(BASEDIR)/include/asm/arch:
+	[ -e $(BASEDIR)/include/asm/arch ] || \
+	ln -sf $(BASEDIR)/include/asm/$(TARGET_SUBARCH) $(BASEDIR)/include/asm/arch
+
diff -r e701461b1251 xen/arch/arm/Rules.mk
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/Rules.mk	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,25 @@
+########################################
+# arm-specific definitions
+
+#
+# If you change any of these configuration options then you must
+# 'make clean' before rebuilding.
+#
+
+ifeq ($(TARGET_SUBARCH),)
+$(error "XEN_TARGET_SUBARCH must be supplied.")
+endif
+
+xenoprof := y
+
+# Each SoC may have its own build rules
+-include $(BASEDIR)/arch/$(TARGET_ARCH)/$(TARGET_SUBARCH)/Rules.mk
+
+CFLAGS	+= -mabi=aapcs-linux -mno-thumb-interwork -fno-builtin -fno-common
+CFLAGS  += -nostdinc -fno-strict-aliasing -mno-thumb-interwork
+CFLAGS  += -iwithprefix include -Wno-pointer-arith -pipe
+CFLAGS  += -I$(BASEDIR)/include -I$(BASEDIR)/include/security -I$(BASEDIR)/include/security/crypto
+CFLAGS	+= $(CFLAGS-y)
+
+
+
diff -r e701461b1251 xen/arch/arm/lib/Makefile
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/Makefile	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,11 @@
+obj-y += div64.o
+obj-y += memzero.o memset.o memcpy.o memchr.o memmove.o
+obj-y += strchr.o lib1funcs.o 
+obj-y += clearbit.o testchangebit.o testclearbit.o testsetbit.o setbit.o findbit.o
+obj-y += getuser.o putuser.o uaccess.o
+obj-y += ashldi3.o ashrdi3.o
+
+obj-y += muldi3.o
+obj-y += delay.o
+obj-y += lshrdi3.o bpabi.o bpabi-asm.o
+
diff -r e701461b1251 xen/arch/arm/lib/ashldi3.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/ashldi3.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,45 @@
+/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
+   Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file.  (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING.  If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA.  */
+
+
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+#define al r0
+#define ah r1
+
+ENTRY(__ashldi3)
+ENTRY(__aeabi_llsl)
+
+	subs	r3, r2, #32
+	rsb	ip, r2, #32
+	movmi	ah, ah, lsl r2
+	movpl	ah, al, lsl r3
+	orrmi	ah, ah, al, lsr ip
+	mov	al, al, lsl r2
+	mov	pc, lr
+
diff -r e701461b1251 xen/arch/arm/lib/ashrdi3.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/ashrdi3.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,46 @@
+/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
+   Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file.  (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING.  If not, write to
+the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA.  */
+
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <asm/asm-macros.h>
+
+#define al r0
+#define ah r1
+
+ENTRY(__ashrdi3)
+ENTRY(__aeabi_lasr)
+
+	subs	r3, r2, #32
+	rsb	ip, r2, #32
+	movmi	al, al, lsr r2
+	movpl	al, ah, asr r3
+	orrmi	al, al, ah, lsl ip
+	mov	ah, ah, asr r2
+	mov	pc, lr
+
diff -r e701461b1251 xen/arch/arm/lib/bpabi-asm.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/bpabi-asm.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,55 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+#ifdef __ARMEB__
+#define xxh r0
+#define xxl r1
+#define yyh r2
+#define yyl r3
+#else
+#define xxh r1
+#define xxl r0
+#define yyh r3
+#define yyl r2
+#endif	
+	
+#if 0
+ENTRY(__aeabi_ldivmod)
+	stmfd	sp!, {r4-r7, r11, r14}
+	mov	r6, r0
+	mov	r7, r1
+	mov	r5, r2
+	mov	r4, r3
+
+	bl	__divdi3
+
+	mul	r4, r0, r4
+	mla	r12, r5, r1, r4
+
+	umull	r2, r3, r0, r5
+	add	r3, r12, r3
+	subs	r2, r5, r2
+	sbc	r3, r7, r3
+	ldmfd	sp!, {r4-r7, r11, r14}
+
+	bx	r14
+#endif
+
+ENTRY(__aeabi_ldivmod)
+	sub	sp, sp, #8
+	stmfd	sp!, {sp, lr}
+	bl	__gnu_ldivmod_helper (PLT)
+	ldr	lr, [sp, #4]
+	add	sp, sp, #8
+	ldmfd	sp!, {r2, r3}
+	bx	lr
+	
+ENTRY(__aeabi_uldivmod)
+	sub	sp, sp, #8
+	stmfd	sp!, {sp, lr}
+	bl	__gnu_uldivmod_helper (PLT)
+	ldr	lr, [sp, #4]
+	add	sp, sp, #8
+	ldmfd	sp!, {r2, r3}
+	bx	lr
+	
diff -r e701461b1251 xen/arch/arm/lib/bpabi.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/bpabi.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,51 @@
+/* Miscellaneous BPABI functions.
+
+   Copyright (C) 2003, 2004  Free Software Foundation, Inc.
+   Contributed by CodeSourcery, LLC.
+
+   This file is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published by the
+   Free Software Foundation; either version 2, or (at your option) any
+   later version.
+
+   In addition to the permissions in the GNU General Public License, the
+   Free Software Foundation gives you unlimited permission to link the
+   compiled version of this file into combinations with other programs,
+   and to distribute those combinations without any restriction coming
+   from the use of this file.  (The General Public License restrictions
+   do apply in other respects; for example, they cover modification of
+   the file, and distribution when not linked into a combine
+   executable.)
+
+   This file is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; see the file COPYING.  If not, write to
+   the Free Software Foundation, 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+extern long long __divdi3 (long long, long long);
+extern unsigned long long __udivdi3 (unsigned long long, unsigned long long);
+
+long long __gnu_ldivmod_helper (long long a, long long b, long long *remainder)
+{
+	long long quotient;
+
+	quotient = __divdi3 (a, b);
+	*remainder = a - b * quotient;
+	return quotient;
+}
+
+unsigned long long __gnu_uldivmod_helper (unsigned long long a, unsigned long long b, unsigned long long *remainder)
+{
+	unsigned long long quotient;
+
+	quotient = __udivdi3 (a, b);
+	*remainder = a - b * quotient;
+
+	return quotient;
+}
+
diff -r e701461b1251 xen/arch/arm/lib/clearbit.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/clearbit.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,24 @@
+#include <xen/config.h>
+#include <asm/processor.h>
+#include <asm/asm-macros.h>
+
+                .text
+
+/*
+ * Purpose  : Function to clear a bit
+ * Prototype: int clear_bit(int bit, void *addr)
+ */
+ENTRY(_clear_bit_be)
+		eor	r0, r0, #0x18		@ big endian byte ordering
+ENTRY(_clear_bit_le)
+		and	r2, r0, #7
+		mov	r3, #1
+		mov	r3, r3, lsl r2
+		save_and_disable_irqs ip, r2
+		ldrb	r2, [r1, r0, lsr #3]
+		bic	r2, r2, r3
+		strb	r2, [r1, r0, lsr #3]
+		restore_irqs ip
+		mov	pc,lr
+
+
diff -r e701461b1251 xen/arch/arm/lib/copy_template.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/copy_template.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,255 @@
+/*
+ *  linux/arch/arm/lib/copy_template.s
+ *
+ *  Code template for optimized memory copy functions
+ *
+ *  Author:	Nicolas Pitre
+ *  Created:	Sep 28, 2005
+ *  Copyright:	MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+/*
+ * This can be used to enable code to cacheline align the source pointer.
+ * Experiments on tested architectures (StrongARM and XScale) didn't show
+ * this a worthwhile thing to do.  That might be different in the future.
+ */
+//#define CALGN(code...)	code
+#define CALGN(code...)
+
+/*
+ * Theory of operation
+ * -------------------
+ *
+ * This file provides the core code for a forward memory copy used in
+ * the implementation of memcopy(), copy_to_user() and copy_from_user().
+ *
+ * The including file must define the following accessor macros
+ * according to the need of the given function:
+ *
+ * ldr1w ptr reg abort
+ *
+ *	This loads one word from 'ptr', stores it in 'reg' and increments
+ *	'ptr' to the next word. The 'abort' argument is used for fixup tables.
+ *
+ * ldr4w ptr reg1 reg2 reg3 reg4 abort
+ * ldr8w ptr, reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ *
+ *	This loads four or eight words starting from 'ptr', stores them
+ *	in provided registers and increments 'ptr' past those words.
+ *	The'abort' argument is used for fixup tables.
+ *
+ * ldr1b ptr reg cond abort
+ *
+ *	Similar to ldr1w, but it loads a byte and increments 'ptr' one byte.
+ *	It also must apply the condition code if provided, otherwise the
+ *	"al" condition is assumed by default.
+ *
+ * str1w ptr reg abort
+ * str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ * str1b ptr reg cond abort
+ *
+ *	Same as their ldr* counterparts, but data is stored to 'ptr' location
+ *	rather than being loaded.
+ *
+ * enter reg1 reg2
+ *
+ *	Preserve the provided registers on the stack plus any additional
+ *	data as needed by the implementation including this code. Called
+ *	upon code entry.
+ *
+ * exit reg1 reg2
+ *
+ *	Restore registers with the values previously saved with the
+ *	'preserv' macro. Called upon code termination.
+ */
+
+
+		enter	r4, lr
+
+		subs	r2, r2, #4
+		blt	8f
+		ands	ip, r0, #3
+		pld	[r1, #0]
+		bne	9f
+		ands	ip, r1, #3
+		bne	10f
+
+1:		subs	r2, r2, #(28)
+		stmfd	sp!, {r5 - r8}
+		blt	5f
+
+	CALGN(	ands	ip, r1, #31		)
+	CALGN(	rsb	r3, ip, #32		)
+	CALGN(	sbcnes	r4, r3, r2		)  @ C is always set here
+	CALGN(	bcs	2f			)
+	CALGN(	adr	r4, 6f			)
+	CALGN(	subs	r2, r2, r3		)  @ C gets set
+	CALGN(	add	pc, r4, ip		)
+
+		pld	[r1, #0]
+2:		subs	r2, r2, #96
+		pld	[r1, #28]
+		blt	4f
+		pld	[r1, #60]
+		pld	[r1, #92]
+
+3:		pld	[r1, #124]
+4:		ldr8w	r1, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
+		subs	r2, r2, #32
+		str8w	r0, r3, r4, r5, r6, r7, r8, ip, lr, abort=20f
+		bge	3b
+		cmn	r2, #96	
+		bge	4b
+
+5:		ands	ip, r2, #28
+		rsb	ip, ip, #32
+		addne	pc, pc, ip		@ C is always clear here
+		b	7f
+6:		nop
+		ldr1w	r1, r3, abort=20f
+		ldr1w	r1, r4, abort=20f
+		ldr1w	r1, r5, abort=20f
+		ldr1w	r1, r6, abort=20f
+		ldr1w	r1, r7, abort=20f
+		ldr1w	r1, r8, abort=20f
+		ldr1w	r1, lr, abort=20f
+
+		add	pc, pc, ip
+		nop
+		nop
+		str1w	r0, r3, abort=20f
+		str1w	r0, r4, abort=20f
+		str1w	r0, r5, abort=20f
+		str1w	r0, r6, abort=20f
+		str1w	r0, r7, abort=20f
+		str1w	r0, r8, abort=20f
+		str1w	r0, lr, abort=20f
+
+	CALGN(	bcs	2b			)
+
+7:		ldmfd	sp!, {r5 - r8}
+
+8:		movs	r2, r2, lsl #31
+		ldr1b	r1, r3, ne, abort=21f
+		ldr1b	r1, r4, cs, abort=21f
+		ldr1b	r1, ip, cs, abort=21f
+		str1b	r0, r3, ne, abort=21f
+		str1b	r0, r4, cs, abort=21f
+		str1b	r0, ip, cs, abort=21f
+
+		exit	r4, pc
+
+9:		rsb	ip, ip, #4
+		cmp	ip, #2
+		ldr1b	r1, r3, gt, abort=21f
+		ldr1b	r1, r4, ge, abort=21f
+		ldr1b	r1, lr, abort=21f
+		str1b	r0, r3, gt, abort=21f
+		str1b	r0, r4, ge, abort=21f
+		subs	r2, r2, ip
+		str1b	r0, lr, abort=21f
+		blt	8b
+		ands	ip, r1, #3
+		beq	1b
+
+10:		bic	r1, r1, #3
+		cmp	ip, #2
+		ldr1w	r1, lr, abort=21f
+		beq	17f
+		bgt	18f
+
+
+		.macro	forward_copy_shift pull push
+
+		subs	r2, r2, #28
+		blt	14f
+
+	CALGN(	ands	ip, r1, #31		)
+	CALGN(	rsb	ip, ip, #32		)
+	CALGN(	sbcnes	r4, ip, r2		)  @ C is always set here
+	CALGN(	subcc	r2, r2, ip		)
+	CALGN(	bcc	15f			)
+
+11:		stmfd	sp!, {r5 - r9}
+
+		pld	[r1, #0]
+		subs	r2, r2, #96
+		pld	[r1, #28]
+		blt	13f
+		pld	[r1, #60]
+		pld	[r1, #92]
+
+12:		pld	[r1, #124]
+13:		ldr4w	r1, r4, r5, r6, r7, abort=19f
+		mov	r3, lr, pull #\pull
+		subs	r2, r2, #32
+		ldr4w	r1, r8, r9, ip, lr, abort=19f
+		orr	r3, r3, r4, push #\push
+		mov	r4, r4, pull #\pull
+		orr	r4, r4, r5, push #\push
+		mov	r5, r5, pull #\pull
+		orr	r5, r5, r6, push #\push
+		mov	r6, r6, pull #\pull
+		orr	r6, r6, r7, push #\push
+		mov	r7, r7, pull #\pull
+		orr	r7, r7, r8, push #\push
+		mov	r8, r8, pull #\pull
+		orr	r8, r8, r9, push #\push
+		mov	r9, r9, pull #\pull
+		orr	r9, r9, ip, push #\push
+		mov	ip, ip, pull #\pull
+		orr	ip, ip, lr, push #\push
+		str8w	r0, r3, r4, r5, r6, r7, r8, r9, ip, , abort=19f
+		bge	12b
+		cmn	r2, #96	
+		bge	13b
+
+		ldmfd	sp!, {r5 - r9}
+
+14:		ands	ip, r2, #28
+		beq	16f
+
+15:		mov	r3, lr, pull #\pull
+		ldr1w	r1, lr, abort=21f
+		subs	ip, ip, #4
+		orr	r3, r3, lr, push #\push
+		str1w	r0, r3, abort=21f
+		bgt	15b
+	CALGN(	cmp	r2, #0			)
+	CALGN(	bge	11b			)
+
+16:		sub	r1, r1, #(\push / 8)
+		b	8b
+
+		.endm
+
+
+		forward_copy_shift	pull=8	push=24
+
+17:		forward_copy_shift	pull=16	push=16
+
+18:		forward_copy_shift	pull=24	push=8
+
+
+/*
+ * Abort preamble and completion macros.
+ * If a fixup handler is required then those macros must surround it.
+ * It is assumed that the fixup code will handle the private part of
+ * the exit macro.
+ */
+
+	.macro	copy_abort_preamble
+19:	ldmfd	sp!, {r5 - r9}
+	b	21f
+20:	ldmfd	sp!, {r5 - r8}
+21:
+	.endm
+
+	.macro	copy_abort_end
+	ldmfd	sp!, {r4, pc}
+	.endm
+
diff -r e701461b1251 xen/arch/arm/lib/delay.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/delay.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,7 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+		.text
+
+ENTRY(_udelay)
+	mov	pc,lr
diff -r e701461b1251 xen/arch/arm/lib/div64.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/div64.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,199 @@
+/*
+ *  linux/arch/arm/lib/div64.S
+ *
+ *  Optimized computation of 64-bit dividend / 32-bit divisor  
+ *
+ *  Author:	Nicolas Pitre
+ *  Created:	Oct 5, 2003
+ *  Copyright:	Monta Vista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+#ifdef __ARMEB__
+#define xh r0
+#define xl r1
+#define yh r2
+#define yl r3
+#else
+#define xl r0
+#define xh r1
+#define yl r2
+#define yh r3
+#endif
+
+/*
+ * __do_div64: perform a division with 64-bit dividend and 32-bit divisor.
+ *
+ * Note: Calling convention is totally non standard for optimal code.
+ *       This is meant to be used by do_div() from include/asm/div64.h only.
+ *
+ * Input parameters:
+ * 	xh-xl	= dividend (clobbered)
+ * 	r4	= divisor (preserved)
+ *
+ * Output values:
+ * 	yh-yl	= result
+ * 	xh	= remainder
+ *
+ * Clobbered regs: xl, ip
+ */
+
+ENTRY(__do_div64)
+
+	@ Test for easy paths first.
+	subs	ip, r4, #1
+	bls	9f			@ divisor is 0 or 1
+	tst	ip, r4
+	beq	8f			@ divisor is power of 2
+
+	@ See if we need to handle upper 32-bit result.
+	cmp	xh, r4
+	mov	yh, #0
+	blo	3f
+
+	@ Align divisor with upper part of dividend.
+	@ The aligned divisor is stored in yl preserving the original.
+	@ The bit position is stored in ip.
+
+#if __LINUX_ARM_ARCH__ >= 5
+
+	clz	yl, r4
+	clz	ip, xh
+	sub	yl, yl, ip
+	mov	ip, #1
+	mov	ip, ip, lsl yl
+	mov	yl, r4, lsl yl
+
+#else
+
+	mov	yl, r4
+	mov	ip, #1
+1:	cmp	yl, #0x80000000
+	cmpcc	yl, xh
+	movcc	yl, yl, lsl #1
+	movcc	ip, ip, lsl #1
+	bcc	1b
+
+#endif
+
+	@ The division loop for needed upper bit positions.
+ 	@ Break out early if dividend reaches 0.
+2:	cmp	xh, yl
+	orrcs	yh, yh, ip
+	subcss	xh, xh, yl
+	movnes	ip, ip, lsr #1
+	mov	yl, yl, lsr #1
+	bne	2b
+
+	@ See if we need to handle lower 32-bit result.
+3:	cmp	xh, #0
+	mov	yl, #0
+	cmpeq	xl, r4
+	movlo	xh, xl
+	movlo	pc, lr
+
+	@ The division loop for lower bit positions.
+	@ Here we shift remainer bits leftwards rather than moving the
+	@ divisor for comparisons, considering the carry-out bit as well.
+	mov	ip, #0x80000000
+4:	movs	xl, xl, lsl #1
+	adcs	xh, xh, xh
+	beq	6f
+	cmpcc	xh, r4
+5:	orrcs	yl, yl, ip
+	subcs	xh, xh, r4
+	movs	ip, ip, lsr #1
+	bne	4b
+	mov	pc, lr
+
+	@ The top part of remainder became zero.  If carry is set
+	@ (the 33th bit) this is a false positive so resume the loop.
+	@ Otherwise, if lower part is also null then we are done.
+6:	bcs	5b
+	cmp	xl, #0
+	moveq	pc, lr
+
+	@ We still have remainer bits in the low part.  Bring them up.
+
+#if __LINUX_ARM_ARCH__ >= 5
+
+	clz	xh, xl			@ we know xh is zero here so...
+	add	xh, xh, #1
+	mov	xl, xl, lsl xh
+	mov	ip, ip, lsr xh
+
+#else
+
+7:	movs	xl, xl, lsl #1
+	mov	ip, ip, lsr #1
+	bcc	7b
+
+#endif
+
+	@ Current remainder is now 1.  It is worthless to compare with
+	@ divisor at this point since divisor can not be smaller than 3 here.
+	@ If possible, branch for another shift in the division loop.
+	@ If no bit position left then we are done.
+	movs	ip, ip, lsr #1
+	mov	xh, #1
+	bne	4b
+	mov	pc, lr
+
+8:	@ Division by a power of 2: determine what that divisor order is
+	@ then simply shift values around
+
+#if __LINUX_ARM_ARCH__ >= 5
+
+	clz	ip, r4
+	rsb	ip, ip, #31
+
+#else
+
+	mov	yl, r4
+	cmp	r4, #(1 << 16)
+	mov	ip, #0
+	movhs	yl, yl, lsr #16
+	movhs	ip, #16
+
+	cmp	yl, #(1 << 8)
+	movhs	yl, yl, lsr #8
+	addhs	ip, ip, #8
+
+	cmp	yl, #(1 << 4)
+	movhs	yl, yl, lsr #4
+	addhs	ip, ip, #4
+
+	cmp	yl, #(1 << 2)
+	addhi	ip, ip, #3
+	addls	ip, ip, yl, lsr #1
+
+#endif
+
+	mov	yh, xh, lsr ip
+	mov	yl, xl, lsr ip
+	rsb	ip, ip, #32
+	orr	yl, yl, xh, lsl ip
+	mov	xh, xl, lsl ip
+	mov	xh, xh, lsr ip
+	mov	pc, lr
+
+	@ eq -> division by 1: obvious enough...
+9:	moveq	yl, xl
+	moveq	yh, xh
+	moveq	xh, #0
+	moveq	pc, lr
+
+	@ Division by 0:
+	str	lr, [sp, #-8]!
+	bl	__div0
+
+	@ as wrong as it could be...
+	mov	yl, #0
+	mov	yh, #0
+	mov	xh, #0
+	ldr	pc, [sp], #8
+
diff -r e701461b1251 xen/arch/arm/lib/findbit.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/findbit.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,81 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+                .text
+
+/*
+ * Purpose  : Find a 'zero' bit
+ * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
+ */
+ENTRY(_find_first_zero_bit)
+		teq	r1, #0	
+		beq	3f
+		mov	r2, #0
+1:		ldrb	r3, [r0, r2, lsr #3]
+		eors	r3, r3, #0xff		@ invert bits
+		bne	.found			@ any now set - found zero bit
+		add	r2, r2, #8		@ next bit pointer
+2:		cmp	r2, r1			@ any more?
+		blo	1b
+3:		mov	r0, r1			@ no free bits
+		mov	pc,lr
+
+/*
+ * Purpose  : Find next 'zero' bit
+ * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
+ */
+ENTRY(_find_next_zero_bit)
+		teq	r1, #0
+		beq	3b
+		ands	ip, r2, #7
+		beq	1b			@ If new byte, goto old routine
+		ldrb	r3, [r0, r2, lsr #3]
+		eor	r3, r3, #0xff		@ now looking for a 1 bit
+		movs	r3, r3, lsr ip		@ shift off unused bits
+		bne	.found
+		orr	r2, r2, #7		@ if zero, then no bits here
+		add	r2, r2, #1		@ align bit pointer
+		b	2b			@ loop for next bit
+
+/*
+ * Purpose  : Find a 'one' bit
+ * Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit);
+ */
+ENTRY(_find_first_bit)
+		teq	r1, #0	
+		beq	3f
+		mov	r2, #0
+1:		ldrb	r3, [r0, r2, lsr #3]
+		movs	r3, r3
+		bne	.found			@ any now set - found zero bit
+		add	r2, r2, #8		@ next bit pointer
+2:		cmp	r2, r1			@ any more?
+		blo	1b
+3:		mov	r0, r1			@ no free bits
+		mov	pc,lr
+
+/*
+ * Purpose  : Find next 'one' bit
+ * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
+ */
+ENTRY(_find_next_bit)
+		teq	r1, #0
+		beq	3b
+		ands	ip, r2, #7
+		beq	1b			@ If new byte, goto old routine
+		ldrb	r3, [r0, r2, lsr #3]
+		movs	r3, r3, lsr ip		@ shift off unused bits
+		bne	.found
+		orr	r2, r2, #7		@ if zero, then no bits here
+		add	r2, r2, #1		@ align bit pointer
+		b	2b			@ loop for next bit
+
+  
+.found:
+		rsb	r1, r3, #0
+		and	r3, r3, r1
+		clz	r3, r3
+		rsb	r3, r3, #31
+		add	r0, r2, r3
+		mov	pc,lr
+
diff -r e701461b1251 xen/arch/arm/lib/gcclib.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/gcclib.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,33 @@
+/* gcclib.h -- definitions for various functions 'borrowed' from gcc-2.95.3 */
+/* I Molton     29/07/01 */
+
+#ifndef __GCCLIB_H__
+#define __GCCLIB_H__
+#define BITS_PER_UNIT  8
+#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
+
+typedef unsigned int UQItype    __attribute__ ((mode (QI)));
+typedef          int SItype     __attribute__ ((mode (SI)));
+typedef unsigned int USItype    __attribute__ ((mode (SI)));
+typedef          int DItype     __attribute__ ((mode (DI)));
+typedef          int word_type 	__attribute__ ((mode (__word__)));
+typedef unsigned int UDItype    __attribute__ ((mode (DI)));
+
+#ifdef __ARMEB__
+  struct DIstruct {SItype high, low;};
+#else
+  struct DIstruct {SItype low, high;};
+#endif
+
+typedef union
+{
+  struct DIstruct s;
+  DItype ll;
+} DIunion;
+
+typedef struct __attribute__((reg_return))
+{
+        long long quot;
+        long long rem;
+} lldiv_t_rr;
+#endif
diff -r e701461b1251 xen/arch/arm/lib/getuser.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/getuser.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,77 @@
+/*
+ *  linux/arch/arm/lib/getuser.S
+ *
+ *  Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Idea from x86 version, (C) Copyright 1998 Linus Torvalds
+ *
+ * These functions have a non-standard call interface to make them more
+ * efficient, especially as they return an error value in addition to
+ * the "real" return value.
+ *
+ * __get_user_X
+ *
+ * Inputs:	r0 contains the address
+ * Outputs:	r0 is the error code
+ *		r2, r3 contains the zero-extended value
+ *		lr corrupted
+ *
+ * No other registers must be altered.  (see include/asm-arm/uaccess.h
+ * for specific ASM register usage).
+ *
+ * Note that ADDR_LIMIT is either 0 or 0xc0000000.
+ * Note also that it is intended that __get_user_bad is not global.
+ */
+#include <xen/errno.h>
+
+	.global	__get_user_1
+__get_user_1:
+1:	ldrbt	r2, [r0]
+	mov	r0, #0
+	mov	pc, lr
+
+	.global	__get_user_2
+__get_user_2:
+2:	ldrbt	r2, [r0], #1
+3:	ldrbt	r3, [r0]
+#ifndef __ARMEB__
+	orr	r2, r2, r3, lsl #8
+#else
+	orr	r2, r3, r2, lsl #8
+#endif
+	mov	r0, #0
+	mov	pc, lr
+
+	.global	__get_user_4
+__get_user_4:
+4:	ldrt	r2, [r0]
+	mov	r0, #0
+	mov	pc, lr
+
+	.global	__get_user_8
+__get_user_8:
+5:	ldrt	r2, [r0], #4
+6:	ldrt	r3, [r0]
+	mov	r0, #0
+	mov	pc, lr
+
+	.global __get_user_bad
+__get_user_bad_8:
+	mov	r3, #0
+__get_user_bad:
+	mov	r2, #0
+	mov	r0, #-EFAULT
+	mov	pc, lr
+
+.section __ex_table, "a"
+	.long	1b, __get_user_bad
+	.long	2b, __get_user_bad
+	.long	3b, __get_user_bad
+	.long	4b, __get_user_bad
+	.long	5b, __get_user_bad_8
+	.long	6b, __get_user_bad_8
+.previous
diff -r e701461b1251 xen/arch/arm/lib/lib1funcs.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/lib1funcs.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,256 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+
+.macro ARM_DIV_BODY dividend, divisor, result, curbit
+
+	@ Initially shift the divisor left 3 bits if possible,
+	@ set curbit accordingly.  This allows for curbit to be located
+	@ at the left end of each 4 bit nibbles in the division loop
+	@ to save one loop in most cases.
+	tst	\divisor, #0xe0000000
+	moveq	\divisor, \divisor, lsl #3
+	moveq	\curbit, #8
+	movne	\curbit, #1
+
+	@ Unless the divisor is very big, shift it up in multiples of
+	@ four bits, since this is the amount of unwinding in the main
+	@ division loop.  Continue shifting until the divisor is 
+	@ larger than the dividend.
+1:	cmp	\divisor, #0x10000000
+	cmplo	\divisor, \dividend
+	movlo	\divisor, \divisor, lsl #4
+	movlo	\curbit, \curbit, lsl #4
+	blo	1b
+
+	@ For very big divisors, we must shift it a bit at a time, or
+	@ we will be in danger of overflowing.
+1:	cmp	\divisor, #0x80000000
+	cmplo	\divisor, \dividend
+	movlo	\divisor, \divisor, lsl #1
+	movlo	\curbit, \curbit, lsl #1
+	blo	1b
+
+	mov	\result, #0
+
+	@ Division loop
+1:	cmp	\dividend, \divisor
+	subhs	\dividend, \dividend, \divisor
+	orrhs	\result,   \result,   \curbit
+	cmp	\dividend, \divisor,  lsr #1
+	subhs	\dividend, \dividend, \divisor, lsr #1
+	orrhs	\result,   \result,   \curbit,  lsr #1
+	cmp	\dividend, \divisor,  lsr #2
+	subhs	\dividend, \dividend, \divisor, lsr #2
+	orrhs	\result,   \result,   \curbit,  lsr #2
+	cmp	\dividend, \divisor,  lsr #3
+	subhs	\dividend, \dividend, \divisor, lsr #3
+	orrhs	\result,   \result,   \curbit,  lsr #3
+	cmp	\dividend, #0			@ Early termination?
+	movnes	\curbit,   \curbit,  lsr #4	@ No, any more bits to do?
+	movne	\divisor,  \divisor, lsr #4
+	bne	1b
+
+.endm
+
+
+.macro ARM_DIV2_ORDER divisor, order
+	cmp	\divisor, #(1 << 16)
+	movhs	\divisor, \divisor, lsr #16
+	movhs	\order, #16
+	movlo	\order, #0
+
+	cmp	\divisor, #(1 << 8)
+	movhs	\divisor, \divisor, lsr #8
+	addhs	\order, \order, #8
+
+	cmp	\divisor, #(1 << 4)
+	movhs	\divisor, \divisor, lsr #4
+	addhs	\order, \order, #4
+
+	cmp	\divisor, #(1 << 2)
+	addhi	\order, \order, #3
+	addls	\order, \order, \divisor, lsr #1
+.endm
+
+
+.macro ARM_MOD_BODY dividend, divisor, order, spare
+	mov	\order, #0
+
+	@ Unless the divisor is very big, shift it up in multiples of
+	@ four bits, since this is the amount of unwinding in the main
+	@ division loop.  Continue shifting until the divisor is 
+	@ larger than the dividend.
+1:	cmp	\divisor, #0x10000000
+	cmplo	\divisor, \dividend
+	movlo	\divisor, \divisor, lsl #4
+	addlo	\order, \order, #4
+	blo	1b
+
+	@ For very big divisors, we must shift it a bit at a time, or
+	@ we will be in danger of overflowing.
+1:	cmp	\divisor, #0x80000000
+	cmplo	\divisor, \dividend
+	movlo	\divisor, \divisor, lsl #1
+	addlo	\order, \order, #1
+	blo	1b
+
+	@ Perform all needed substractions to keep only the reminder.
+	@ Do comparisons in batch of 4 first.
+	subs	\order, \order, #3		@ yes, 3 is intended here
+	blt	2f
+
+1:	cmp	\dividend, \divisor
+	subhs	\dividend, \dividend, \divisor
+	cmp	\dividend, \divisor,  lsr #1
+	subhs	\dividend, \dividend, \divisor, lsr #1
+	cmp	\dividend, \divisor,  lsr #2
+	subhs	\dividend, \dividend, \divisor, lsr #2
+	cmp	\dividend, \divisor,  lsr #3
+	subhs	\dividend, \dividend, \divisor, lsr #3
+	cmp	\dividend, #1
+	mov	\divisor, \divisor, lsr #4
+	subges	\order, \order, #4
+	bge	1b
+
+	tst	\order, #3
+	teqne	\dividend, #0
+	beq	5f
+
+	@ Either 1, 2 or 3 comparison/substractions are left.
+2:	cmn	\order, #2
+	blt	4f
+	beq	3f
+	cmp	\dividend, \divisor
+	subhs	\dividend, \dividend, \divisor
+	mov	\divisor,  \divisor,  lsr #1
+3:	cmp	\dividend, \divisor
+	subhs	\dividend, \dividend, \divisor
+	mov	\divisor,  \divisor,  lsr #1
+4:	cmp	\dividend, \divisor
+	subhs	\dividend, \dividend, \divisor
+5:
+.endm
+
+
+ENTRY(__udivsi3)
+ENTRY(__aeabi_uidiv)
+	subs	r2, r1, #1
+	moveq	pc, lr
+	bcc	Ldiv0
+	cmp	r0, r1
+	bls	11f
+	tst	r1, r2
+	beq	12f
+
+	ARM_DIV_BODY r0, r1, r2, r3
+
+	mov	r0, r2
+	mov	pc, lr
+
+11:	moveq	r0, #1
+	movne	r0, #0
+	mov	pc, lr
+
+12:	ARM_DIV2_ORDER r1, r2
+
+	mov	r0, r0, lsr r2
+	mov	pc, lr
+
+
+ENTRY(__umodsi3)
+	subs	r2, r1, #1			@ compare divisor with 1
+	bcc	Ldiv0
+	cmpne	r0, r1				@ compare dividend with divisor
+	moveq   r0, #0
+	tsthi	r1, r2				@ see if divisor is power of 2
+	andeq	r0, r0, r2
+	movls	pc, lr
+
+	ARM_MOD_BODY r0, r1, r2, r3
+
+	mov	pc, lr
+
+
+ENTRY(__divsi3)
+ENTRY(__aeabi_idiv)
+	cmp	r1, #0
+	eor	ip, r0, r1			@ save the sign of the result.
+	beq	Ldiv0
+	rsbmi	r1, r1, #0			@ loops below use unsigned.
+	subs	r2, r1, #1			@ division by 1 or -1 ?
+	beq	10f
+	movs	r3, r0
+	rsbmi	r3, r0, #0			@ positive dividend value
+	cmp	r3, r1
+	bls	11f
+	tst	r1, r2				@ divisor is power of 2 ?
+	beq	12f
+
+	ARM_DIV_BODY r3, r1, r0, r2
+
+	cmp	ip, #0
+	rsbmi	r0, r0, #0
+	mov	pc, lr
+
+10:	teq	ip, r0				@ same sign ?
+	rsbmi	r0, r0, #0
+	mov	pc, lr
+
+11:	movlo	r0, #0
+	moveq	r0, ip, asr #31
+	orreq	r0, r0, #1
+	mov	pc, lr
+
+12:	ARM_DIV2_ORDER r1, r2
+
+	cmp	ip, #0
+	mov	r0, r3, lsr r2
+	rsbmi	r0, r0, #0
+	mov	pc, lr
+
+
+ENTRY(__modsi3)
+
+	cmp	r1, #0
+	beq	Ldiv0
+	rsbmi	r1, r1, #0			@ loops below use unsigned.
+	movs	ip, r0				@ preserve sign of dividend
+	rsbmi	r0, r0, #0			@ if negative make positive
+	subs	r2, r1, #1			@ compare divisor with 1
+	cmpne	r0, r1				@ compare dividend with divisor
+	moveq	r0, #0
+	tsthi	r1, r2				@ see if divisor is power of 2
+	andeq	r0, r0, r2
+	bls	10f
+
+	ARM_MOD_BODY r0, r1, r2, r3
+
+10:	cmp	ip, #0
+	rsbmi	r0, r0, #0
+	mov	pc, lr
+
+ENTRY(__aeabi_uidivmod)
+	stmfd   sp!, {r0, r1, ip, lr}
+	bl      __aeabi_uidiv
+	ldmfd   sp!, {r1, r2, ip, lr}
+	mul     r3, r0, r2
+	sub     r1, r1, r3
+	mov     pc, lr
+
+ENTRY(__aeabi_idivmod)
+	stmfd   sp!, {r0, r1, ip, lr}
+	bl      __aeabi_idiv
+	ldmfd   sp!, {r1, r2, ip, lr}
+	mul     r3, r0, r2
+	sub     r1, r1, r3
+	mov     pc, lr
+
+Ldiv0:
+
+	str	lr, [sp, #-8]!
+	bl	__div0
+	mov	r0, #0			@ About as wrong as it could be.
+	ldr	pc, [sp], #8
+
+
diff -r e701461b1251 xen/arch/arm/lib/longlong.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/longlong.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,183 @@
+/* longlong.h -- based on code from gcc-2.95.3
+
+   definitions for mixed size 32/64 bit arithmetic.
+   Copyright (C) 1991, 92, 94, 95, 96, 1997, 1998 Free Software Foundation, Inc.
+
+   This definition file is free software; you can redistribute it
+   and/or modify it under the terms of the GNU General Public
+   License as published by the Free Software Foundation; either
+   version 2, or (at your option) any later version.
+
+   This definition file is distributed in the hope that it will be
+   useful, but WITHOUT ANY WARRANTY; without even the implied
+   warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+   See the GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place - Suite 330,
+   Boston, MA 02111-1307, USA.  */
+
+/* Borrowed from GCC 2.95.3, I Molton 29/07/01 */
+
+#ifndef SI_TYPE_SIZE
+#define SI_TYPE_SIZE 32
+#endif
+
+#define __BITS4 (SI_TYPE_SIZE / 4)
+#define __ll_B (1L << (SI_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((USItype) (t) % __ll_B)
+#define __ll_highpart(t) ((USItype) (t) / __ll_B)
+
+/* Define auxiliary asm macros.
+
+   1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
+   multiplies two USItype integers MULTIPLER and MULTIPLICAND,
+   and generates a two-part USItype product in HIGH_PROD and
+   LOW_PROD.
+
+   2) __umulsidi3(a,b) multiplies two USItype integers A and B,
+   and returns a UDItype product.  This is just a variant of umul_ppmm.
+
+   3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+   denominator) divides a two-word unsigned integer, composed by the
+   integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
+   places the quotient in QUOTIENT and the remainder in REMAINDER.
+   HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
+   If, in addition, the most significant bit of DENOMINATOR must be 1,
+   then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
+
+   4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+   denominator).  Like udiv_qrnnd but the numbers are signed.  The
+   quotient is rounded towards 0.
+
+   5) count_leading_zeros(count, x) counts the number of zero-bits from
+   the msb to the first non-zero bit.  This is the number of steps X
+   needs to be shifted left to set the msb.  Undefined for X == 0.
+
+   6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+   high_addend_2, low_addend_2) adds two two-word unsigned integers,
+   composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
+   LOW_ADDEND_2 respectively.  The result is placed in HIGH_SUM and
+   LOW_SUM.  Overflow (i.e. carry out) is not stored anywhere, and is
+   lost.
+
+   7) sub_ddmmss(high_difference, low_difference, high_minuend,
+   low_minuend, high_subtrahend, low_subtrahend) subtracts two
+   two-word unsigned integers, composed by HIGH_MINUEND_1 and
+   LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
+   respectively.  The result is placed in HIGH_DIFFERENCE and
+   LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
+   and is lost.
+
+   If any of these macros are left undefined for a particular CPU,
+   C macros are used.  */
+
+#if defined (__arm__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("adds	%1, %4, %5					\n\
+	adc	%0, %2, %3"						\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "%r" ((USItype) (ah)),					\
+	     "rI" ((USItype) (bh)),					\
+	     "%r" ((USItype) (al)),					\
+	     "rI" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("subs	%1, %4, %5					\n\
+	sbc	%0, %2, %3"						\
+	   : "=r" ((USItype) (sh)),					\
+	     "=&r" ((USItype) (sl))					\
+	   : "r" ((USItype) (ah)),					\
+	     "rI" ((USItype) (bh)),					\
+	     "r" ((USItype) (al)),					\
+	     "rI" ((USItype) (bl)))
+#define umul_ppmm(xh, xl, a, b) \
+{register USItype __t0, __t1, __t2;					\
+  __asm__ ("%@ Inlined umul_ppmm					\n\
+	mov	%2, %5, lsr #16						\n\
+	mov	%0, %6, lsr #16						\n\
+	bic	%3, %5, %2, lsl #16					\n\
+	bic	%4, %6, %0, lsl #16					\n\
+	mul	%1, %3, %4						\n\
+	mul	%4, %2, %4						\n\
+	mul	%3, %0, %3						\n\
+	mul	%0, %2, %0						\n\
+	adds	%3, %4, %3						\n\
+	addcs	%0, %0, #65536						\n\
+	adds	%1, %1, %3, lsl #16					\n\
+	adc	%0, %0, %3, lsr #16"					\
+	   : "=&r" ((USItype) (xh)),					\
+	     "=r" ((USItype) (xl)),					\
+	     "=&r" (__t0), "=&r" (__t1), "=r" (__t2)			\
+	   : "r" ((USItype) (a)),					\
+	     "r" ((USItype) (b)));}
+#define UMUL_TIME 20
+#define UDIV_TIME 100
+#endif /* __arm__ */
+
+#define __umulsidi3(u, v) \
+  ({DIunion __w;							\
+    umul_ppmm (__w.s.high, __w.s.low, u, v);				\
+    __w.ll; })
+
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+  do {									\
+    USItype __d1, __d0, __q1, __q0;					\
+    USItype __r1, __r0, __m;						\
+    __d1 = __ll_highpart (d);						\
+    __d0 = __ll_lowpart (d);						\
+									\
+    __r1 = (n1) % __d1;							\
+    __q1 = (n1) / __d1;							\
+    __m = (USItype) __q1 * __d0;					\
+    __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
+    if (__r1 < __m)							\
+      {									\
+	__q1--, __r1 += (d);						\
+	if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+	  if (__r1 < __m)						\
+	    __q1--, __r1 += (d);					\
+      }									\
+    __r1 -= __m;							\
+									\
+    __r0 = __r1 % __d1;							\
+    __q0 = __r1 / __d1;							\
+    __m = (USItype) __q0 * __d0;					\
+    __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
+    if (__r0 < __m)							\
+      {									\
+	__q0--, __r0 += (d);						\
+	if (__r0 >= (d))						\
+	  if (__r0 < __m)						\
+	    __q0--, __r0 += (d);					\
+      }									\
+    __r0 -= __m;							\
+									\
+    (q) = (USItype) __q1 * __ll_B | __q0;				\
+    (r) = __r0;								\
+  } while (0)
+
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+
+#define count_leading_zeros(count, x) \
+  do {									\
+    USItype __xr = (x);							\
+    USItype __a;							\
+									\
+    if (SI_TYPE_SIZE <= 32)						\
+      {									\
+	__a = __xr < ((USItype)1<<2*__BITS4)				\
+	  ? (__xr < ((USItype)1<<__BITS4) ? 0 : __BITS4)		\
+	  : (__xr < ((USItype)1<<3*__BITS4) ?  2*__BITS4 : 3*__BITS4);	\
+      }									\
+    else								\
+      {									\
+	for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8)			\
+	  if (((__xr >> __a) & 0xff) != 0)				\
+	    break;							\
+      }									\
+									\
+    (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a);		\
+  } while (0)
diff -r e701461b1251 xen/arch/arm/lib/lshrdi3.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/lshrdi3.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,17 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+#define al r0
+#define ah r1
+
+ENTRY(__lshrdi3)
+ENTRY(__aeabi_llsr)
+
+        subs    r3, r2, #32
+        rsb     ip, r2, #32
+        movmi   al, al, lsr r2
+        movpl   al, ah, lsr r3
+ 	orrmi   al, al, ah, lsl ip 
+        mov     ah, ah, lsr r2
+        mov     pc, lr
+
diff -r e701461b1251 xen/arch/arm/lib/math.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/math.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,3 @@
+void mdummy(void)
+{
+}
diff -r e701461b1251 xen/arch/arm/lib/memchr.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/memchr.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,14 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+	.text
+	.align	5
+ENTRY(memchr)
+1:	subs	r2, r2, #1
+	bmi	2f
+	ldrb	r3, [r0], #1
+	teq	r3, r1
+	bne	1b
+	sub	r0, r0, #1
+2:	movne	r0, #0
+	mov	pc,lr
diff -r e701461b1251 xen/arch/arm/lib/memcpy.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/memcpy.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,60 @@
+/*
+ *  linux/arch/arm/lib/memcpy.S
+ *
+ *  Author:	Nicolas Pitre
+ *  Created:	Sep 28, 2005
+ *  Copyright:	MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+
+	.macro ldr1w ptr reg abort
+	ldr \reg, [\ptr], #4
+	.endm
+
+	.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
+	ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}
+	.endm
+
+	.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+	ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
+	.endm
+
+	.macro ldr1b ptr reg cond=al abort
+	ldr\cond\()b \reg, [\ptr], #1
+	.endm
+
+	.macro str1w ptr reg abort
+	str \reg, [\ptr], #4
+	.endm
+
+	.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+	stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
+	.endm
+
+	.macro str1b ptr reg cond=al abort
+	str\cond\()b \reg, [\ptr], #1
+	.endm
+
+	.macro enter reg1 reg2
+	stmdb sp!, {r0, \reg1, \reg2}
+	.endm
+
+	.macro exit reg1 reg2
+	ldmfd sp!, {r0, \reg1, \reg2}
+	.endm
+
+	.text
+
+/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+
+ENTRY(memcpy)
+
+#include "copy_template.S"
+
diff -r e701461b1251 xen/arch/arm/lib/memmove.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/memmove.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,207 @@
+/*
+ *  linux/arch/arm/lib/memmove.S
+ *
+ *  Author:	Nicolas Pitre
+ *  Created:	Sep 28, 2005
+ *  Copyright:	(C) MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+
+/*
+ * This can be used to enable code to cacheline align the source pointer.
+ * Experiments on tested architectures (StrongARM and XScale) didn't show
+ * this a worthwhile thing to do.  That might be different in the future.
+ */
+//#define CALGN(code...)        code
+#define CALGN(code...)
+
+		.text
+
+/*
+ * Prototype: void *memmove(void *dest, const void *src, size_t n);
+ *
+ * Note:
+ *
+ * If the memory regions don't overlap, we simply branch to memcpy which is
+ * normally a bit faster. Otherwise the copy is done going downwards.  This
+ * is a transposition of the code from copy_template.S but with the copy
+ * occurring in the opposite direction.
+ */
+
+ENTRY(memmove)
+
+		subs	ip, r0, r1
+		cmphi	r2, ip
+		bls	memcpy
+
+		stmfd	sp!, {r0, r4, lr}
+		add	r1, r1, r2
+		add	r0, r0, r2
+		subs	r2, r2, #4
+		blt	8f
+		ands	ip, r0, #3
+	PLD(	pld	[r1, #-4]		)
+		bne	9f
+		ands	ip, r1, #3
+		bne	10f
+
+1:		subs	r2, r2, #(28)
+		stmfd	sp!, {r5 - r8}
+		blt	5f
+
+	CALGN(	ands	ip, r1, #31		)
+	CALGN(	sbcnes	r4, ip, r2		)  @ C is always set here
+	CALGN(	bcs	2f			)
+	CALGN(	adr	r4, 6f			)
+	CALGN(	subs	r2, r2, ip		)  @ C is set here
+	CALGN(	add	pc, r4, ip		)
+
+	PLD(	pld	[r1, #-4]		)
+2:	PLD(	subs	r2, r2, #96		)
+	PLD(	pld	[r1, #-32]		)
+	PLD(	blt	4f			)
+	PLD(	pld	[r1, #-64]		)
+	PLD(	pld	[r1, #-96]		)
+
+3:	PLD(	pld	[r1, #-128]		)
+4:		ldmdb	r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+		subs	r2, r2, #32
+		stmdb	r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+		bge	3b
+	PLD(	cmn	r2, #96			)
+	PLD(	bge	4b			)
+
+5:		ands	ip, r2, #28
+		rsb	ip, ip, #32
+		addne	pc, pc, ip		@ C is always clear here
+		b	7f
+6:		nop
+		ldr	r3, [r1, #-4]!
+		ldr	r4, [r1, #-4]!
+		ldr	r5, [r1, #-4]!
+		ldr	r6, [r1, #-4]!
+		ldr	r7, [r1, #-4]!
+		ldr	r8, [r1, #-4]!
+		ldr	lr, [r1, #-4]!
+
+		add	pc, pc, ip
+		nop
+		nop
+		str	r3, [r0, #-4]!
+		str	r4, [r0, #-4]!
+		str	r5, [r0, #-4]!
+		str	r6, [r0, #-4]!
+		str	r7, [r0, #-4]!
+		str	r8, [r0, #-4]!
+		str	lr, [r0, #-4]!
+
+	CALGN(	bcs	2b			)
+
+7:		ldmfd	sp!, {r5 - r8}
+
+8:		movs	r2, r2, lsl #31
+		ldrneb	r3, [r1, #-1]!
+		ldrcsb	r4, [r1, #-1]!
+		ldrcsb	ip, [r1, #-1]
+		strneb	r3, [r0, #-1]!
+		strcsb	r4, [r0, #-1]!
+		strcsb	ip, [r0, #-1]
+		ldmfd	sp!, {r0, r4, pc}
+
+9:		cmp	ip, #2
+		ldrgtb	r3, [r1, #-1]!
+		ldrgeb	r4, [r1, #-1]!
+		ldrb	lr, [r1, #-1]!
+		strgtb	r3, [r0, #-1]!
+		strgeb	r4, [r0, #-1]!
+		subs	r2, r2, ip
+		strb	lr, [r0, #-1]!
+		blt	8b
+		ands	ip, r1, #3
+		beq	1b
+
+10:		bic	r1, r1, #3
+		cmp	ip, #2
+		ldr	r3, [r1, #0]
+		beq	17f
+		blt	18f
+
+
+		.macro	backward_copy_shift push pull
+
+		subs	r2, r2, #28
+		blt	14f
+
+	CALGN(	ands	ip, r1, #31		)
+	CALGN(	rsb	ip, ip, #32		)
+	CALGN(	sbcnes	r4, ip, r2		)  @ C is always set here
+	CALGN(	subcc	r2, r2, ip		)
+	CALGN(	bcc	15f			)
+
+11:		stmfd	sp!, {r5 - r9}
+
+	PLD(	pld	[r1, #-4]		)
+	PLD(	subs	r2, r2, #96		)
+	PLD(	pld	[r1, #-32]		)
+	PLD(	blt	13f			)
+	PLD(	pld	[r1, #-64]		)
+	PLD(	pld	[r1, #-96]		)
+
+12:	PLD(	pld	[r1, #-128]		)
+13:		ldmdb   r1!, {r7, r8, r9, ip}
+		mov     lr, r3, push #\push
+		subs    r2, r2, #32
+		ldmdb   r1!, {r3, r4, r5, r6}
+		orr     lr, lr, ip, pull #\pull
+		mov     ip, ip, push #\push
+		orr     ip, ip, r9, pull #\pull
+		mov     r9, r9, push #\push
+		orr     r9, r9, r8, pull #\pull
+		mov     r8, r8, push #\push
+		orr     r8, r8, r7, pull #\pull
+		mov     r7, r7, push #\push
+		orr     r7, r7, r6, pull #\pull
+		mov     r6, r6, push #\push
+		orr     r6, r6, r5, pull #\pull
+		mov     r5, r5, push #\push
+		orr     r5, r5, r4, pull #\pull
+		mov     r4, r4, push #\push
+		orr     r4, r4, r3, pull #\pull
+		stmdb   r0!, {r4 - r9, ip, lr}
+		bge	12b
+	PLD(	cmn	r2, #96			)
+	PLD(	bge	13b			)
+
+		ldmfd	sp!, {r5 - r9}
+
+14:		ands	ip, r2, #28
+		beq	16f
+
+15:		mov     lr, r3, push #\push
+		ldr	r3, [r1, #-4]!
+		subs	ip, ip, #4
+		orr	lr, lr, r3, pull #\pull
+		str	lr, [r0, #-4]!
+		bgt	15b
+	CALGN(	cmp	r2, #0			)
+	CALGN(	bge	11b			)
+
+16:		add	r1, r1, #(\pull / 8)
+		b	8b
+
+		.endm
+
+
+		backward_copy_shift	push=8	pull=24
+
+17:		backward_copy_shift	push=16	pull=16
+
+18:		backward_copy_shift	push=24	pull=8
+
diff -r e701461b1251 xen/arch/arm/lib/memory.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/memory.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,421 @@
+/*
+ *  linux/arch/arm/lib/memcpy.S
+ *
+ *  Author:	Nicolas Pitre
+ *  Created:	Sep 28, 2005
+ *  Copyright:	MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+
+	.macro ldr1w ptr reg abort
+	ldr \reg, [\ptr], #4
+	.endm
+
+	.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
+	ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4}
+	.endm
+
+	.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+	ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
+	.endm
+
+	.macro ldr1b ptr reg cond=al abort
+	ldr\cond\()b \reg, [\ptr], #1
+	.endm
+
+	.macro str1w ptr reg abort
+	str \reg, [\ptr], #4
+	.endm
+
+	.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+	stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
+	.endm
+
+	.macro str1b ptr reg cond=al abort
+	str\cond\()b \reg, [\ptr], #1
+	.endm
+
+	.macro enter reg1 reg2
+	stmdb sp!, {r0, \reg1, \reg2}
+	.endm
+
+	.macro exit reg1 reg2
+	ldmfd sp!, {r0, \reg1, \reg2}
+	.endm
+
+	.text
+
+/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+
+ENTRY(memcpy)
+
+#include "copy_template.S"
+
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+	.text
+	.align	5
+ENTRY(memchr)
+1:	subs	r2, r2, #1
+	bmi	2f
+	ldrb	r3, [r0], #1
+	teq	r3, r1
+	bne	1b
+	sub	r0, r0, #1
+2:	movne	r0, #0
+	mov	pc,lr
+/*
+ *  linux/arch/arm/lib/memmove.S
+ *
+ *  Author:	Nicolas Pitre
+ *  Created:	Sep 28, 2005
+ *  Copyright:	(C) MontaVista Software Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+
+/*
+ * This can be used to enable code to cacheline align the source pointer.
+ * Experiments on tested architectures (StrongARM and XScale) didn't show
+ * this a worthwhile thing to do.  That might be different in the future.
+ */
+//#define CALGN(code...)        code
+#define CALGN(code...)
+
+		.text
+
+/*
+ * Prototype: void *memmove(void *dest, const void *src, size_t n);
+ *
+ * Note:
+ *
+ * If the memory regions don't overlap, we simply branch to memcpy which is
+ * normally a bit faster. Otherwise the copy is done going downwards.  This
+ * is a transposition of the code from copy_template.S but with the copy
+ * occurring in the opposite direction.
+ */
+
+ENTRY(memmove)
+
+		subs	ip, r0, r1
+		cmphi	r2, ip
+		bls	memcpy
+
+		stmfd	sp!, {r0, r4, lr}
+		add	r1, r1, r2
+		add	r0, r0, r2
+		subs	r2, r2, #4
+		blt	8f
+		ands	ip, r0, #3
+	PLD(	pld	[r1, #-4]		)
+		bne	9f
+		ands	ip, r1, #3
+		bne	10f
+
+1:		subs	r2, r2, #(28)
+		stmfd	sp!, {r5 - r8}
+		blt	5f
+
+	CALGN(	ands	ip, r1, #31		)
+	CALGN(	sbcnes	r4, ip, r2		)  @ C is always set here
+	CALGN(	bcs	2f			)
+	CALGN(	adr	r4, 6f			)
+	CALGN(	subs	r2, r2, ip		)  @ C is set here
+	CALGN(	add	pc, r4, ip		)
+
+	PLD(	pld	[r1, #-4]		)
+2:	PLD(	subs	r2, r2, #96		)
+	PLD(	pld	[r1, #-32]		)
+	PLD(	blt	4f			)
+	PLD(	pld	[r1, #-64]		)
+	PLD(	pld	[r1, #-96]		)
+
+3:	PLD(	pld	[r1, #-128]		)
+4:		ldmdb	r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
+		subs	r2, r2, #32
+		stmdb	r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
+		bge	3b
+	PLD(	cmn	r2, #96			)
+	PLD(	bge	4b			)
+
+5:		ands	ip, r2, #28
+		rsb	ip, ip, #32
+		addne	pc, pc, ip		@ C is always clear here
+		b	7f
+6:		nop
+		ldr	r3, [r1, #-4]!
+		ldr	r4, [r1, #-4]!
+		ldr	r5, [r1, #-4]!
+		ldr	r6, [r1, #-4]!
+		ldr	r7, [r1, #-4]!
+		ldr	r8, [r1, #-4]!
+		ldr	lr, [r1, #-4]!
+
+		add	pc, pc, ip
+		nop
+		nop
+		str	r3, [r0, #-4]!
+		str	r4, [r0, #-4]!
+		str	r5, [r0, #-4]!
+		str	r6, [r0, #-4]!
+		str	r7, [r0, #-4]!
+		str	r8, [r0, #-4]!
+		str	lr, [r0, #-4]!
+
+	CALGN(	bcs	2b			)
+
+7:		ldmfd	sp!, {r5 - r8}
+
+8:		movs	r2, r2, lsl #31
+		ldrneb	r3, [r1, #-1]!
+		ldrcsb	r4, [r1, #-1]!
+		ldrcsb	ip, [r1, #-1]
+		strneb	r3, [r0, #-1]!
+		strcsb	r4, [r0, #-1]!
+		strcsb	ip, [r0, #-1]
+		ldmfd	sp!, {r0, r4, pc}
+
+9:		cmp	ip, #2
+		ldrgtb	r3, [r1, #-1]!
+		ldrgeb	r4, [r1, #-1]!
+		ldrb	lr, [r1, #-1]!
+		strgtb	r3, [r0, #-1]!
+		strgeb	r4, [r0, #-1]!
+		subs	r2, r2, ip
+		strb	lr, [r0, #-1]!
+		blt	8b
+		ands	ip, r1, #3
+		beq	1b
+
+10:		bic	r1, r1, #3
+		cmp	ip, #2
+		ldr	r3, [r1, #0]
+		beq	17f
+		blt	18f
+
+
+		.macro	backward_copy_shift push pull
+
+		subs	r2, r2, #28
+		blt	14f
+
+	CALGN(	ands	ip, r1, #31		)
+	CALGN(	rsb	ip, ip, #32		)
+	CALGN(	sbcnes	r4, ip, r2		)  @ C is always set here
+	CALGN(	subcc	r2, r2, ip		)
+	CALGN(	bcc	15f			)
+
+11:		stmfd	sp!, {r5 - r9}
+
+	PLD(	pld	[r1, #-4]		)
+	PLD(	subs	r2, r2, #96		)
+	PLD(	pld	[r1, #-32]		)
+	PLD(	blt	13f			)
+	PLD(	pld	[r1, #-64]		)
+	PLD(	pld	[r1, #-96]		)
+
+12:	PLD(	pld	[r1, #-128]		)
+13:		ldmdb   r1!, {r7, r8, r9, ip}
+		mov     lr, r3, push #\push
+		subs    r2, r2, #32
+		ldmdb   r1!, {r3, r4, r5, r6}
+		orr     lr, lr, ip, pull #\pull
+		mov     ip, ip, push #\push
+		orr     ip, ip, r9, pull #\pull
+		mov     r9, r9, push #\push
+		orr     r9, r9, r8, pull #\pull
+		mov     r8, r8, push #\push
+		orr     r8, r8, r7, pull #\pull
+		mov     r7, r7, push #\push
+		orr     r7, r7, r6, pull #\pull
+		mov     r6, r6, push #\push
+		orr     r6, r6, r5, pull #\pull
+		mov     r5, r5, push #\push
+		orr     r5, r5, r4, pull #\pull
+		mov     r4, r4, push #\push
+		orr     r4, r4, r3, pull #\pull
+		stmdb   r0!, {r4 - r9, ip, lr}
+		bge	12b
+	PLD(	cmn	r2, #96			)
+	PLD(	bge	13b			)
+
+		ldmfd	sp!, {r5 - r9}
+
+14:		ands	ip, r2, #28
+		beq	16f
+
+15:		mov     lr, r3, push #\push
+		ldr	r3, [r1, #-4]!
+		subs	ip, ip, #4
+		orr	lr, lr, r3, pull #\pull
+		str	lr, [r0, #-4]!
+		bgt	15b
+	CALGN(	cmp	r2, #0			)
+	CALGN(	bge	11b			)
+
+16:		add	r1, r1, #(\pull / 8)
+		b	8b
+
+		.endm
+
+
+		backward_copy_shift	push=8	pull=24
+
+17:		backward_copy_shift	push=16	pull=16
+
+18:		backward_copy_shift	push=24	pull=8
+
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+	.text
+	.align	5
+	.word	0
+
+1:	subs	r2, r2, #4		@ 1 do we have enough
+	blt	5f			@ 1 bytes to align with?
+	cmp	r3, #2			@ 1
+	strltb	r1, [r0], #1		@ 1
+	strleb	r1, [r0], #1		@ 1
+	strb	r1, [r0], #1		@ 1
+	add	r2, r2, r3		@ 1 (r2 = r2 - (4 - r3))
+/*
+ * The pointer is now aligned and the length is adjusted.  Try doing the
+ * memzero again.
+ */
+
+ENTRY(memset)
+	ands	r3, r0, #3		@ 1 unaligned?
+	bne	1b			@ 1
+/*
+ * we know that the pointer in r0 is aligned to a word boundary.
+ */
+	orr	r1, r1, r1, lsl #8
+	orr	r1, r1, r1, lsl #16
+	mov	r3, r1
+	cmp	r2, #16
+	blt	4f
+/*
+ * We need an extra register for this loop - save the return address and
+ * use the LR
+ */
+	str	lr, [sp, #-4]!
+	mov	ip, r1
+	mov	lr, r1
+
+2:	subs	r2, r2, #64
+	stmgeia	r0!, {r1, r3, ip, lr}	@ 64 bytes at a time.
+	stmgeia	r0!, {r1, r3, ip, lr}
+	stmgeia	r0!, {r1, r3, ip, lr}
+	stmgeia	r0!, {r1, r3, ip, lr}
+	bgt	2b
+	ldmeqfd sp!, {pc}	@ Now <64 bytes to go.
+/*
+ * No need to correct the count; we're only testing bits from now on
+ */
+	tst	r2, #32
+	stmneia	r0!, {r1, r3, ip, lr}
+	stmneia	r0!, {r1, r3, ip, lr}
+	tst	r2, #16
+	stmneia	r0!, {r1, r3, ip, lr}
+	ldr	lr, [sp], #4
+
+4:	tst	r2, #8
+	stmneia	r0!, {r1, r3}
+	tst	r2, #4
+	strne	r1, [r0], #4
+/*
+ * When we get here, we've got less than 4 bytes to zero.  We
+ * may have an unaligned pointer as well.
+ */
+5:	tst	r2, #2
+	strneb	r1, [r0], #1
+	strneb	r1, [r0], #1
+	tst	r2, #1
+	strneb	r1, [r0], #1
+	mov	pc,lr
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+	.text
+	.align	5
+	.word	0
+/*
+ * Align the pointer in r0.  r3 contains the number of bytes that we are
+ * mis-aligned by, and r1 is the number of bytes.  If r1 < 4, then we
+ * don't bother; we use byte stores instead.
+ */
+1:	subs	r1, r1, #4		@ 1 do we have enough
+	blt	5f			@ 1 bytes to align with?
+	cmp	r3, #2			@ 1
+	strltb	r2, [r0], #1		@ 1
+	strleb	r2, [r0], #1		@ 1
+	strb	r2, [r0], #1		@ 1
+	add	r1, r1, r3		@ 1 (r1 = r1 - (4 - r3))
+/*
+ * The pointer is now aligned and the length is adjusted.  Try doing the
+ * memzero again.
+ */
+
+ENTRY(__memzero)
+	mov	r2, #0			@ 1
+	ands	r3, r0, #3		@ 1 unaligned?
+	bne	1b			@ 1
+/*
+ * r3 = 0, and we know that the pointer in r0 is aligned to a word boundary.
+ */
+	cmp	r1, #16			@ 1 we can skip this chunk if we
+	blt	4f			@ 1 have < 16 bytes
+/*
+ * We need an extra register for this loop - save the return address and
+ * use the LR
+ */
+	str	lr, [sp, #-4]!		@ 1
+	mov	ip, r2			@ 1
+	mov	lr, r2			@ 1
+
+3:	subs	r1, r1, #64		@ 1 write 32 bytes out per loop
+	stmgeia	r0!, {r2, r3, ip, lr}	@ 4
+	stmgeia	r0!, {r2, r3, ip, lr}	@ 4
+	stmgeia	r0!, {r2, r3, ip, lr}	@ 4
+	stmgeia	r0!, {r2, r3, ip, lr}	@ 4
+	bgt	3b			@ 1
+	ldmeqfd sp!, {pc}	@ 1/2 quick exit
+/*
+ * No need to correct the count; we're only testing bits from now on
+ */
+	tst	r1, #32			@ 1
+	stmneia	r0!, {r2, r3, ip, lr}	@ 4
+	stmneia	r0!, {r2, r3, ip, lr}	@ 4
+	tst	r1, #16			@ 1 16 bytes or more?
+	stmneia	r0!, {r2, r3, ip, lr}	@ 4
+	ldr	lr, [sp], #4		@ 1
+
+4:	tst	r1, #8			@ 1 8 bytes or more?
+	stmneia	r0!, {r2, r3}		@ 2
+	tst	r1, #4			@ 1 4 bytes or more?
+	strne	r2, [r0], #4		@ 1
+/*
+ * When we get here, we've got less than 4 bytes to zero.  We
+ * may have an unaligned pointer as well.
+ */
+5:	tst	r1, #2			@ 1 2 bytes or more?
+	strneb	r2, [r0], #1		@ 1
+	strneb	r2, [r0], #1		@ 1
+	tst	r1, #1			@ 1 a byte left over
+	strneb	r2, [r0], #1		@ 1
+	mov	pc,lr		@ 1
diff -r e701461b1251 xen/arch/arm/lib/memset.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/memset.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,69 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+	.text
+	.align	5
+	.word	0
+
+1:	subs	r2, r2, #4		@ 1 do we have enough
+	blt	5f			@ 1 bytes to align with?
+	cmp	r3, #2			@ 1
+	strltb	r1, [r0], #1		@ 1
+	strleb	r1, [r0], #1		@ 1
+	strb	r1, [r0], #1		@ 1
+	add	r2, r2, r3		@ 1 (r2 = r2 - (4 - r3))
+/*
+ * The pointer is now aligned and the length is adjusted.  Try doing the
+ * memzero again.
+ */
+
+ENTRY(memset)
+	ands	r3, r0, #3		@ 1 unaligned?
+	bne	1b			@ 1
+/*
+ * we know that the pointer in r0 is aligned to a word boundary.
+ */
+	orr	r1, r1, r1, lsl #8
+	orr	r1, r1, r1, lsl #16
+	mov	r3, r1
+	cmp	r2, #16
+	blt	4f
+/*
+ * We need an extra register for this loop - save the return address and
+ * use the LR
+ */
+	str	lr, [sp, #-4]!
+	mov	ip, r1
+	mov	lr, r1
+
+2:	subs	r2, r2, #64
+	stmgeia	r0!, {r1, r3, ip, lr}	@ 64 bytes at a time.
+	stmgeia	r0!, {r1, r3, ip, lr}
+	stmgeia	r0!, {r1, r3, ip, lr}
+	stmgeia	r0!, {r1, r3, ip, lr}
+	bgt	2b
+	ldmeqfd sp!, {pc}	@ Now <64 bytes to go.
+/*
+ * No need to correct the count; we're only testing bits from now on
+ */
+	tst	r2, #32
+	stmneia	r0!, {r1, r3, ip, lr}
+	stmneia	r0!, {r1, r3, ip, lr}
+	tst	r2, #16
+	stmneia	r0!, {r1, r3, ip, lr}
+	ldr	lr, [sp], #4
+
+4:	tst	r2, #8
+	stmneia	r0!, {r1, r3}
+	tst	r2, #4
+	strne	r1, [r0], #4
+/*
+ * When we get here, we've got less than 4 bytes to zero.  We
+ * may have an unaligned pointer as well.
+ */
+5:	tst	r2, #2
+	strneb	r1, [r0], #1
+	strneb	r1, [r0], #1
+	tst	r2, #1
+	strneb	r1, [r0], #1
+	mov	pc,lr
diff -r e701461b1251 xen/arch/arm/lib/memzero.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/memzero.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,71 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+	.text
+	.align	5
+	.word	0
+/*
+ * Align the pointer in r0.  r3 contains the number of bytes that we are
+ * mis-aligned by, and r1 is the number of bytes.  If r1 < 4, then we
+ * don't bother; we use byte stores instead.
+ */
+1:	subs	r1, r1, #4		@ 1 do we have enough
+	blt	5f			@ 1 bytes to align with?
+	cmp	r3, #2			@ 1
+	strltb	r2, [r0], #1		@ 1
+	strleb	r2, [r0], #1		@ 1
+	strb	r2, [r0], #1		@ 1
+	add	r1, r1, r3		@ 1 (r1 = r1 - (4 - r3))
+/*
+ * The pointer is now aligned and the length is adjusted.  Try doing the
+ * memzero again.
+ */
+
+ENTRY(__memzero)
+	mov	r2, #0			@ 1
+	ands	r3, r0, #3		@ 1 unaligned?
+	bne	1b			@ 1
+/*
+ * r3 = 0, and we know that the pointer in r0 is aligned to a word boundary.
+ */
+	cmp	r1, #16			@ 1 we can skip this chunk if we
+	blt	4f			@ 1 have < 16 bytes
+/*
+ * We need an extra register for this loop - save the return address and
+ * use the LR
+ */
+	str	lr, [sp, #-4]!		@ 1
+	mov	ip, r2			@ 1
+	mov	lr, r2			@ 1
+
+3:	subs	r1, r1, #64		@ 1 write 32 bytes out per loop
+	stmgeia	r0!, {r2, r3, ip, lr}	@ 4
+	stmgeia	r0!, {r2, r3, ip, lr}	@ 4
+	stmgeia	r0!, {r2, r3, ip, lr}	@ 4
+	stmgeia	r0!, {r2, r3, ip, lr}	@ 4
+	bgt	3b			@ 1
+	ldmeqfd sp!, {pc}	@ 1/2 quick exit
+/*
+ * No need to correct the count; we're only testing bits from now on
+ */
+	tst	r1, #32			@ 1
+	stmneia	r0!, {r2, r3, ip, lr}	@ 4
+	stmneia	r0!, {r2, r3, ip, lr}	@ 4
+	tst	r1, #16			@ 1 16 bytes or more?
+	stmneia	r0!, {r2, r3, ip, lr}	@ 4
+	ldr	lr, [sp], #4		@ 1
+
+4:	tst	r1, #8			@ 1 8 bytes or more?
+	stmneia	r0!, {r2, r3}		@ 2
+	tst	r1, #4			@ 1 4 bytes or more?
+	strne	r2, [r0], #4		@ 1
+/*
+ * When we get here, we've got less than 4 bytes to zero.  We
+ * may have an unaligned pointer as well.
+ */
+5:	tst	r1, #2			@ 1 2 bytes or more?
+	strneb	r2, [r0], #1		@ 1
+	strneb	r2, [r0], #1		@ 1
+	tst	r1, #1			@ 1 a byte left over
+	strneb	r2, [r0], #1		@ 1
+	mov	pc,lr		@ 1
diff -r e701461b1251 xen/arch/arm/lib/muldi3.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/muldi3.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,86 @@
+/* More subroutines needed by GCC output code on some machines.  */
+/* Compile this one with gcc.  */
+/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+/* As a special exception, if you link this library with other files,
+   some of which are compiled with GCC, to produce an executable,
+   this library does not by itself cause the resulting executable
+   to be covered by the GNU General Public License.
+   This exception does not however invalidate any other reasons why
+   the executable file might be covered by the GNU General Public License.
+ */
+/* support functions required by the kernel. based on code from gcc-2.95.3 */
+/* I Molton     29/07/01 */
+
+#include "gcclib.h"
+
+#define umul_ppmm(xh, xl, a, b) \
+{register USItype __t0, __t1, __t2;                                     \
+  __asm__ ("%@ Inlined umul_ppmm					\n\
+        mov     %2, %5, lsr #16						\n\
+        mov     %0, %6, lsr #16						\n\
+        bic     %3, %5, %2, lsl #16					\n\
+        bic     %4, %6, %0, lsl #16					\n\
+        mul     %1, %3, %4						\n\
+        mul     %4, %2, %4						\n\
+        mul     %3, %0, %3						\n\
+        mul     %0, %2, %0						\n\
+        adds    %3, %4, %3						\n\
+        addcs   %0, %0, #65536						\n\
+        adds    %1, %1, %3, lsl #16					\n\
+        adc     %0, %0, %3, lsr #16"                                    \
+           : "=&r" ((USItype) (xh)),                                    \
+             "=r" ((USItype) (xl)),                                     \
+             "=&r" (__t0), "=&r" (__t1), "=r" (__t2)                    \
+           : "r" ((USItype) (a)),                                       \
+             "r" ((USItype) (b)));}
+
+
+#define __umulsidi3(u, v) \
+  ({DIunion __w;                                                        \
+    umul_ppmm (__w.s.high, __w.s.low, u, v);                            \
+    __w.ll; })
+
+
+DItype
+__muldi3 (DItype u, DItype v)
+{
+  DIunion w;
+  DIunion uu, vv;
+
+  uu.ll = u,
+  vv.ll = v;
+
+  w.ll = __umulsidi3 (uu.s.low, vv.s.low);
+  w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+               + (USItype) uu.s.high * (USItype) vv.s.low);
+
+  return w.ll;
+}
+
+#if 0
+lldiv_t_rr __aeabi_ldivmod (long long a, long long b) 
+{ 
+	lldiv_t_rr r; 
+	r.quot =__divdi3 (a, b); 
+	r.rem = a - b * r.quot; 
+	return r; 
+}
+#endif
diff -r e701461b1251 xen/arch/arm/lib/putuser.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/putuser.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,75 @@
+/*
+ *  linux/arch/arm/lib/putuser.S
+ *
+ *  Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Idea from x86 version, (C) Copyright 1998 Linus Torvalds
+ *
+ * These functions have a non-standard call interface to make
+ * them more efficient, especially as they return an error
+ * value in addition to the "real" return value.
+ *
+ * __put_user_X
+ *
+ * Inputs:	r0 contains the address
+ *		r2, r3 contains the value
+ * Outputs:	r0 is the error code
+ *		lr corrupted
+ *
+ * No other registers must be altered.  (see include/asm-arm/uaccess.h
+ * for specific ASM register usage).
+ *
+ * Note that ADDR_LIMIT is either 0 or 0xc0000000
+ * Note also that it is intended that __put_user_bad is not global.
+ */
+#include <xen/errno.h>
+
+	.global	__put_user_1
+__put_user_1:
+1:	strbt	r2, [r0]
+	mov	r0, #0
+	mov	pc, lr
+
+	.global	__put_user_2
+__put_user_2:
+	mov	ip, r2, lsr #8
+#ifndef __ARMEB__
+2:	strbt	r2, [r0], #1
+3:	strbt	ip, [r0]
+#else
+2:	strbt	ip, [r0], #1
+3:	strbt	r2, [r0]
+#endif
+	mov	r0, #0
+	mov	pc, lr
+
+	.global	__put_user_4
+__put_user_4:
+4:	strt	r2, [r0]
+	mov	r0, #0
+	mov	pc, lr
+
+	.global	__put_user_8
+__put_user_8:
+5:	strt	r2, [r0], #4
+6:	strt	r3, [r0]
+	mov	r0, #0
+	mov	pc, lr
+
+	.global __put_user_bad
+__put_user_bad:
+	mov	r0, #-EFAULT
+	mov	pc, lr
+
+.section .extable, "a"
+	.long	1b, __put_user_bad
+	.long	2b, __put_user_bad
+	.long	3b, __put_user_bad
+	.long	4b, __put_user_bad
+	.long	5b, __put_user_bad
+	.long	6b, __put_user_bad
+.previous
diff -r e701461b1251 xen/arch/arm/lib/setbit.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/setbit.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,22 @@
+#include <xen/config.h>
+#include <asm/processor.h>
+#include <asm/asm-macros.h>
+
+		.text
+
+/*
+ * Purpose  : Function to set a bit
+ * Prototype: int set_bit(int bit, void *addr)
+ */
+ENTRY(_set_bit_be)
+	eor	r0, r0, #0x18		@ big endian byte ordering
+ENTRY(_set_bit_le)
+	and	r2, r0, #7
+	mov	r3, #1
+	mov	r3, r3, lsl r2
+	save_and_disable_irqs ip, r2
+	ldrb	r2, [r1, r0, lsr #3]
+	orr	r2, r2, r3
+	strb	r2, [r1, r0, lsr #3]
+	restore_irqs ip
+	mov	pc, lr
diff -r e701461b1251 xen/arch/arm/lib/strchr.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/strchr.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,15 @@
+#include <xen/config.h>
+#include <asm/asm-macros.h>
+
+		.text
+		.align	5
+ENTRY(strchr)
+		and	r1, r1, #0xff
+1:		ldrb	r2, [r0], #1
+		teq	r2, r1
+		teqne	r2, #0
+		bne	1b
+		teq	r2, r1
+		movne	r0, #0
+		subeq	r0, r0, #1
+		mov	pc,lr
diff -r e701461b1251 xen/arch/arm/lib/testchangebit.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/testchangebit.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,22 @@
+#include <xen/config.h>
+#include <asm/processor.h>
+#include <asm/asm-macros.h>
+
+                .text
+
+ENTRY(_test_and_change_bit_be)
+		eor	r0, r0, #0x18		@ big endian byte ordering
+ENTRY(_test_and_change_bit_le)
+		add	r1, r1, r0, lsr #3
+		and	r3, r0, #7
+		mov	r0, #1
+		save_and_disable_irqs ip, r2
+		ldrb	r2, [r1]
+		tst	r2, r0, lsl r3
+		eor	r2, r2, r0, lsl r3
+		strb	r2, [r1]
+		restore_irqs ip
+		moveq	r0, #0
+		mov	pc,lr
+
+
diff -r e701461b1251 xen/arch/arm/lib/testclearbit.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/testclearbit.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,22 @@
+#include <xen/config.h>
+#include <asm/processor.h>
+#include <asm/asm-macros.h>
+
+                .text
+
+ENTRY(_test_and_clear_bit_be)
+		eor	r0, r0, #0x18		@ big endian byte ordering
+ENTRY(_test_and_clear_bit_le)
+		add	r1, r1, r0, lsr #3	@ Get byte offset
+		and	r3, r0, #7		@ Get bit offset
+		mov	r0, #1
+		save_and_disable_irqs ip, r2
+		ldrb	r2, [r1]
+		tst	r2, r0, lsl r3
+		bicne	r2, r2, r0, lsl r3
+		strneb	r2, [r1]
+		restore_irqs ip
+		moveq	r0, #0
+		mov	pc,lr
+
+
diff -r e701461b1251 xen/arch/arm/lib/testsetbit.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/testsetbit.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,20 @@
+#include <xen/config.h>
+#include <asm/processor.h>
+#include <asm/asm-macros.h>
+
+                .text
+
+ENTRY(_test_and_set_bit_le)
+		add	r1, r1, r0, lsr #3	@ Get byte offset
+		and	r3, r0, #7		@ Get bit offset
+		mov	r0, #1
+		save_and_disable_irqs ip, r2
+		ldrb	r2, [r1]
+		tst	r2, r0, lsl r3
+		orreq	r2, r2, r0, lsl r3
+		streqb	r2, [r1]
+		restore_irqs ip
+		moveq	r0, #0
+		mov	pc,lr
+
+
diff -r e701461b1251 xen/arch/arm/lib/uaccess.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/uaccess.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,684 @@
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <asm/asm-macros.h>
+
+		.text
+
+#define PAGE_SHIFT 12
+
+/* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n)
+ * Purpose  : copy a block to user memory from kernel memory
+ * Params   : to   - user memory
+ *          : from - kernel memory
+ *          : n    - number of bytes to copy
+ * Returns  : Number of bytes NOT copied.
+ */
+
+.c2u_dest_not_aligned:
+		rsb	ip, ip, #4
+		cmp	ip, #2
+		ldrb	r3, [r1], #1
+USER(		strbt	r3, [r0], #1)			@ May fault
+		ldrgeb	r3, [r1], #1
+USER(		strgebt	r3, [r0], #1)			@ May fault
+		ldrgtb	r3, [r1], #1
+USER(		strgtbt	r3, [r0], #1)			@ May fault
+		sub	r2, r2, ip
+		b	.c2u_dest_aligned
+
+ENTRY(__arch_copy_to_user)
+		stmfd	sp!, {r2, r4 - r7, lr}
+		cmp	r2, #4
+		blt	.c2u_not_enough
+	PLD(	pld	[r1, #0]		)
+	PLD(	pld	[r0, #0]		)
+		ands	ip, r0, #3
+		bne	.c2u_dest_not_aligned
+.c2u_dest_aligned:
+
+		ands	ip, r1, #3
+		bne	.c2u_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.c2u_0fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.c2u_0nowords
+		ldr	r3, [r1], #4
+USER(		strt	r3, [r0], #4)			@ May fault
+		mov	ip, r0, lsl #32 - PAGE_SHIFT	@ On each page, use a ld/st??t instruction
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.c2u_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #32
+		blt	.c2u_0rem8lp
+	PLD(	pld	[r1, #28]		)
+	PLD(	pld	[r0, #28]		)
+	PLD(	subs	ip, ip, #64			)
+	PLD(	blt	.c2u_0cpynopld		)
+	PLD(	pld	[r1, #60]		)
+	PLD(	pld	[r0, #60]		)
+
+.c2u_0cpy8lp:
+	PLD(	pld	[r1, #92]		)
+	PLD(	pld	[r0, #92]		)
+.c2u_0cpynopld:	ldmia	r1!, {r3 - r6}
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		ldmia	r1!, {r3 - r6}
+		subs	ip, ip, #32
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		bpl	.c2u_0cpy8lp
+	PLD(	cmn	ip, #64			)
+	PLD(	bge	.c2u_0cpynopld		)
+	PLD(	add	ip, ip, #64		)
+
+.c2u_0rem8lp:	cmn	ip, #16
+		ldmgeia	r1!, {r3 - r6}
+		stmgeia	r0!, {r3 - r6}			@ Shouldnt fault
+		tst	ip, #8
+		ldmneia	r1!, {r3 - r4}
+		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
+		tst	ip, #4
+		ldrne	r3, [r1], #4
+		strnet	r3, [r0], #4			@ Shouldnt fault
+		ands	ip, ip, #3
+		beq	.c2u_0fupi
+.c2u_0nowords:	teq	ip, #0
+		beq	.c2u_finished
+.c2u_nowords:	cmp	ip, #2
+		ldrb	r3, [r1], #1
+USER(		strbt	r3, [r0], #1)			@ May fault
+		ldrgeb	r3, [r1], #1
+USER(		strgebt	r3, [r0], #1)			@ May fault
+		ldrgtb	r3, [r1], #1
+USER(		strgtbt	r3, [r0], #1)			@ May fault
+		b	.c2u_finished
+
+.c2u_not_enough:
+		movs	ip, r2
+		bne	.c2u_nowords
+.c2u_finished:	mov	r0, #0
+		ldmfd	sp!,{r2, r4 - r7, pc}
+
+.c2u_src_not_aligned:
+		bic	r1, r1, #3
+		ldr	r7, [r1], #4
+		cmp	ip, #2
+		bgt	.c2u_3fupi
+		beq	.c2u_2fupi
+.c2u_1fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.c2u_1nowords
+		mov	r3, r7, pull #8
+		ldr	r7, [r1], #4
+		orr	r3, r3, r7, push #24
+USER(		strt	r3, [r0], #4)			@ May fault
+		mov	ip, r0, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.c2u_1fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.c2u_1rem8lp
+	PLD(	pld	[r1, #12]		)
+	PLD(	pld	[r0, #12]		)
+	PLD(	subs	ip, ip, #32		)
+	PLD(	blt	.c2u_1cpynopld		)
+	PLD(	pld	[r1, #28]		)
+	PLD(	pld	[r0, #28]		)
+
+.c2u_1cpy8lp:
+	PLD(	pld	[r1, #44]		)
+	PLD(	pld	[r0, #44]		)
+.c2u_1cpynopld:	mov	r3, r7, pull #8
+		ldmia	r1!, {r4 - r7}
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #24
+		mov	r4, r4, pull #8
+		orr	r4, r4, r5, push #24
+		mov	r5, r5, pull #8
+		orr	r5, r5, r6, push #24
+		mov	r6, r6, pull #8
+		orr	r6, r6, r7, push #24
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		bpl	.c2u_1cpy8lp
+	PLD(	cmn	ip, #32			)
+	PLD(	bge	.c2u_1cpynopld		)
+	PLD(	add	ip, ip, #32		)
+
+.c2u_1rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #8
+		ldmneia	r1!, {r4, r7}
+		orrne	r3, r3, r4, push #24
+		movne	r4, r4, pull #8
+		orrne	r4, r4, r7, push #24
+		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
+		tst	ip, #4
+		movne	r3, r7, pull #8
+		ldrne	r7, [r1], #4
+		orrne	r3, r3, r7, push #24
+		strnet	r3, [r0], #4			@ Shouldnt fault
+		ands	ip, ip, #3
+		beq	.c2u_1fupi
+.c2u_1nowords:	mov	r3, r7, get_byte_1
+		teq	ip, #0
+		beq	.c2u_finished
+		cmp	ip, #2
+USER(		strbt	r3, [r0], #1)			@ May fault
+		movge	r3, r7, get_byte_2
+USER(		strgebt	r3, [r0], #1)			@ May fault
+		movgt	r3, r7, get_byte_3
+USER(		strgtbt	r3, [r0], #1)			@ May fault
+		b	.c2u_finished
+
+.c2u_2fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.c2u_2nowords
+		mov	r3, r7, pull #16
+		ldr	r7, [r1], #4
+		orr	r3, r3, r7, push #16
+USER(		strt	r3, [r0], #4)			@ May fault
+		mov	ip, r0, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.c2u_2fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.c2u_2rem8lp
+	PLD(	pld	[r1, #12]		)
+	PLD(	pld	[r0, #12]		)
+	PLD(	subs	ip, ip, #32		)
+	PLD(	blt	.c2u_2cpynopld		)
+	PLD(	pld	[r1, #28]		)
+	PLD(	pld	[r0, #28]		)
+
+.c2u_2cpy8lp:
+	PLD(	pld	[r1, #44]		)
+	PLD(	pld	[r0, #44]		)
+.c2u_2cpynopld:	mov	r3, r7, pull #16
+		ldmia	r1!, {r4 - r7}
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #16
+		mov	r4, r4, pull #16
+		orr	r4, r4, r5, push #16
+		mov	r5, r5, pull #16
+		orr	r5, r5, r6, push #16
+		mov	r6, r6, pull #16
+		orr	r6, r6, r7, push #16
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		bpl	.c2u_2cpy8lp
+	PLD(	cmn	ip, #32			)
+	PLD(	bge	.c2u_2cpynopld		)
+	PLD(	add	ip, ip, #32		)
+
+.c2u_2rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #16
+		ldmneia	r1!, {r4, r7}
+		orrne	r3, r3, r4, push #16
+		movne	r4, r4, pull #16
+		orrne	r4, r4, r7, push #16
+		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
+		tst	ip, #4
+		movne	r3, r7, pull #16
+		ldrne	r7, [r1], #4
+		orrne	r3, r3, r7, push #16
+		strnet	r3, [r0], #4			@ Shouldnt fault
+		ands	ip, ip, #3
+		beq	.c2u_2fupi
+.c2u_2nowords:	mov	r3, r7, get_byte_2
+		teq	ip, #0
+		beq	.c2u_finished
+		cmp	ip, #2
+USER(		strbt	r3, [r0], #1)			@ May fault
+		movge	r3, r7, get_byte_3
+USER(		strgebt	r3, [r0], #1)			@ May fault
+		ldrgtb	r3, [r1], #0
+USER(		strgtbt	r3, [r0], #1)			@ May fault
+		b	.c2u_finished
+
+.c2u_3fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.c2u_3nowords
+		mov	r3, r7, pull #24
+		ldr	r7, [r1], #4
+		orr	r3, r3, r7, push #8
+USER(		strt	r3, [r0], #4)			@ May fault
+		mov	ip, r0, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.c2u_3fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.c2u_3rem8lp
+	PLD(	pld	[r1, #12]		)
+	PLD(	pld	[r0, #12]		)
+	PLD(	subs	ip, ip, #32		)
+	PLD(	blt	.c2u_3cpynopld		)
+	PLD(	pld	[r1, #28]		)
+	PLD(	pld	[r0, #28]		)
+
+.c2u_3cpy8lp:
+	PLD(	pld	[r1, #44]		)
+	PLD(	pld	[r0, #44]		)
+.c2u_3cpynopld:	mov	r3, r7, pull #24
+		ldmia	r1!, {r4 - r7}
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #8
+		mov	r4, r4, pull #24
+		orr	r4, r4, r5, push #8
+		mov	r5, r5, pull #24
+		orr	r5, r5, r6, push #8
+		mov	r6, r6, pull #24
+		orr	r6, r6, r7, push #8
+		stmia	r0!, {r3 - r6}			@ Shouldnt fault
+		bpl	.c2u_3cpy8lp
+	PLD(	cmn	ip, #32			)
+	PLD(	bge	.c2u_3cpynopld		)
+	PLD(	add	ip, ip, #32		)
+
+.c2u_3rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #24
+		ldmneia	r1!, {r4, r7}
+		orrne	r3, r3, r4, push #8
+		movne	r4, r4, pull #24
+		orrne	r4, r4, r7, push #8
+		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
+		tst	ip, #4
+		movne	r3, r7, pull #24
+		ldrne	r7, [r1], #4
+		orrne	r3, r3, r7, push #8
+		strnet	r3, [r0], #4			@ Shouldnt fault
+		ands	ip, ip, #3
+		beq	.c2u_3fupi
+.c2u_3nowords:	mov	r3, r7, get_byte_3
+		teq	ip, #0
+		beq	.c2u_finished
+		cmp	ip, #2
+USER(		strbt	r3, [r0], #1)			@ May fault
+		ldrgeb	r3, [r1], #1
+USER(		strgebt	r3, [r0], #1)			@ May fault
+		ldrgtb	r3, [r1], #0
+USER(		strgtbt	r3, [r0], #1)			@ May fault
+		b	.c2u_finished
+
+		.section .fixup,"ax"
+		.align	0
+9001:		ldmfd	sp!, {r0, r4 - r7, pc}
+		.previous
+
+/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose  : copy a block from user memory to kernel memory
+ * Params   : to   - kernel memory
+ *          : from - user memory
+ *          : n    - number of bytes to copy
+ * Returns  : Number of bytes NOT copied.
+ */
+.cfu_dest_not_aligned:
+		rsb	ip, ip, #4
+		cmp	ip, #2
+USER(		ldrbt	r3, [r1], #1)			@ May fault
+		strb	r3, [r0], #1
+USER(		ldrgebt	r3, [r1], #1)			@ May fault
+		strgeb	r3, [r0], #1
+USER(		ldrgtbt	r3, [r1], #1)			@ May fault
+		strgtb	r3, [r0], #1
+		sub	r2, r2, ip
+		b	.cfu_dest_aligned
+
+ENTRY(__arch_copy_from_user)
+		stmfd	sp!, {r0, r2, r4 - r7, lr}
+		cmp	r2, #4
+		blt	.cfu_not_enough
+	PLD(	pld	[r1, #0]		)
+	PLD(	pld	[r0, #0]		)
+		ands	ip, r0, #3
+		bne	.cfu_dest_not_aligned
+.cfu_dest_aligned:
+		ands	ip, r1, #3
+		bne	.cfu_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.cfu_0fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.cfu_0nowords
+USER(		ldrt	r3, [r1], #4)
+		str	r3, [r0], #4
+		mov	ip, r1, lsl #32 - PAGE_SHIFT	@ On each page, use a ld/st??t instruction
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.cfu_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #32
+		blt	.cfu_0rem8lp
+	PLD(	pld	[r1, #28]		)
+	PLD(	pld	[r0, #28]		)
+	PLD(	subs	ip, ip, #64			)
+	PLD(	blt	.cfu_0cpynopld		)
+	PLD(	pld	[r1, #60]		)
+	PLD(	pld	[r0, #60]		)
+
+.cfu_0cpy8lp:
+	PLD(	pld	[r1, #92]		)
+	PLD(	pld	[r0, #92]		)
+.cfu_0cpynopld:	ldmia	r1!, {r3 - r6}			@ Shouldnt fault
+		stmia	r0!, {r3 - r6}
+		ldmia	r1!, {r3 - r6}			@ Shouldnt fault
+		subs	ip, ip, #32
+		stmia	r0!, {r3 - r6}
+		bpl	.cfu_0cpy8lp
+	PLD(	cmn	ip, #64			)
+	PLD(	bge	.cfu_0cpynopld		)
+	PLD(	add	ip, ip, #64		)
+
+.cfu_0rem8lp:	cmn	ip, #16
+		ldmgeia	r1!, {r3 - r6}			@ Shouldnt fault
+		stmgeia	r0!, {r3 - r6}
+		tst	ip, #8
+		ldmneia	r1!, {r3 - r4}			@ Shouldnt fault
+		stmneia	r0!, {r3 - r4}
+		tst	ip, #4
+		ldrnet	r3, [r1], #4			@ Shouldnt fault
+		strne	r3, [r0], #4
+		ands	ip, ip, #3
+		beq	.cfu_0fupi
+.cfu_0nowords:	teq	ip, #0
+		beq	.cfu_finished
+.cfu_nowords:	cmp	ip, #2
+USER(		ldrbt	r3, [r1], #1)			@ May fault
+		strb	r3, [r0], #1
+USER(		ldrgebt	r3, [r1], #1)			@ May fault
+		strgeb	r3, [r0], #1
+USER(		ldrgtbt	r3, [r1], #1)			@ May fault
+		strgtb	r3, [r0], #1
+		b	.cfu_finished
+
+.cfu_not_enough:
+		movs	ip, r2
+		bne	.cfu_nowords
+.cfu_finished:	mov	r0, #0
+		add	sp, sp, #8
+		ldmfd	sp!,{r4 - r7, pc}
+
+.cfu_src_not_aligned:
+		bic	r1, r1, #3
+USER(		ldrt	r7, [r1], #4)			@ May fault
+		cmp	ip, #2
+		bgt	.cfu_3fupi
+		beq	.cfu_2fupi
+.cfu_1fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.cfu_1nowords
+		mov	r3, r7, pull #8
+USER(		ldrt	r7, [r1], #4)			@ May fault
+		orr	r3, r3, r7, push #24
+		str	r3, [r0], #4
+		mov	ip, r1, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.cfu_1fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.cfu_1rem8lp
+	PLD(	pld	[r1, #12]		)
+	PLD(	pld	[r0, #12]		)
+	PLD(	subs	ip, ip, #32		)
+	PLD(	blt	.cfu_1cpynopld		)
+	PLD(	pld	[r1, #28]		)
+	PLD(	pld	[r0, #28]		)
+
+.cfu_1cpy8lp:
+	PLD(	pld	[r1, #44]		)
+	PLD(	pld	[r0, #44]		)
+.cfu_1cpynopld:	mov	r3, r7, pull #8
+		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #24
+		mov	r4, r4, pull #8
+		orr	r4, r4, r5, push #24
+		mov	r5, r5, pull #8
+		orr	r5, r5, r6, push #24
+		mov	r6, r6, pull #8
+		orr	r6, r6, r7, push #24
+		stmia	r0!, {r3 - r6}
+		bpl	.cfu_1cpy8lp
+	PLD(	cmn	ip, #32			)
+	PLD(	bge	.cfu_1cpynopld		)
+	PLD(	add	ip, ip, #32		)
+
+.cfu_1rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #8
+		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
+		orrne	r3, r3, r4, push #24
+		movne	r4, r4, pull #8
+		orrne	r4, r4, r7, push #24
+		stmneia	r0!, {r3 - r4}
+		tst	ip, #4
+		movne	r3, r7, pull #8
+USER(		ldrnet	r7, [r1], #4)			@ May fault
+		orrne	r3, r3, r7, push #24
+		strne	r3, [r0], #4
+		ands	ip, ip, #3
+		beq	.cfu_1fupi
+.cfu_1nowords:	mov	r3, r7, get_byte_1
+		teq	ip, #0
+		beq	.cfu_finished
+		cmp	ip, #2
+		strb	r3, [r0], #1
+		movge	r3, r7, get_byte_2
+		strgeb	r3, [r0], #1
+		movgt	r3, r7, get_byte_3
+		strgtb	r3, [r0], #1
+		b	.cfu_finished
+
+.cfu_2fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.cfu_2nowords
+		mov	r3, r7, pull #16
+USER(		ldrt	r7, [r1], #4)			@ May fault
+		orr	r3, r3, r7, push #16
+		str	r3, [r0], #4
+		mov	ip, r1, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.cfu_2fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.cfu_2rem8lp
+	PLD(	pld	[r1, #12]		)
+	PLD(	pld	[r0, #12]		)
+	PLD(	subs	ip, ip, #32		)
+	PLD(	blt	.cfu_2cpynopld		)
+	PLD(	pld	[r1, #28]		)
+	PLD(	pld	[r0, #28]		)
+
+.cfu_2cpy8lp:
+	PLD(	pld	[r1, #44]		)
+	PLD(	pld	[r0, #44]		)
+.cfu_2cpynopld:	mov	r3, r7, pull #16
+		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
+		subs	ip, ip, #16
+		orr	r3, r3, r4, push #16
+		mov	r4, r4, pull #16
+		orr	r4, r4, r5, push #16
+		mov	r5, r5, pull #16
+		orr	r5, r5, r6, push #16
+		mov	r6, r6, pull #16
+		orr	r6, r6, r7, push #16
+		stmia	r0!, {r3 - r6}
+		bpl	.cfu_2cpy8lp
+	PLD(	cmn	ip, #32			)
+	PLD(	bge	.cfu_2cpynopld		)
+	PLD(	add	ip, ip, #32		)
+
+.cfu_2rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #16
+		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
+		orrne	r3, r3, r4, push #16
+		movne	r4, r4, pull #16
+		orrne	r4, r4, r7, push #16
+		stmneia	r0!, {r3 - r4}
+		tst	ip, #4
+		movne	r3, r7, pull #16
+USER(		ldrnet	r7, [r1], #4)			@ May fault
+		orrne	r3, r3, r7, push #16
+		strne	r3, [r0], #4
+		ands	ip, ip, #3
+		beq	.cfu_2fupi
+.cfu_2nowords:	mov	r3, r7, get_byte_2
+		teq	ip, #0
+		beq	.cfu_finished
+		cmp	ip, #2
+		strb	r3, [r0], #1
+		movge	r3, r7, get_byte_3
+		strgeb	r3, [r0], #1
+USER(		ldrgtbt	r3, [r1], #0)			@ May fault
+		strgtb	r3, [r0], #1
+		b	.cfu_finished
+
+.cfu_3fupi:	subs	r2, r2, #4
+		addmi	ip, r2, #4
+		bmi	.cfu_3nowords
+		mov	r3, r7, pull #24
+USER(		ldrt	r7, [r1], #4)			@ May fault
+		orr	r3, r3, r7, push #8
+		str	r3, [r0], #4
+		mov	ip, r1, lsl #32 - PAGE_SHIFT
+		rsb	ip, ip, #0
+		movs	ip, ip, lsr #32 - PAGE_SHIFT
+		beq	.cfu_3fupi
+		cmp	r2, ip
+		movlt	ip, r2
+		sub	r2, r2, ip
+		subs	ip, ip, #16
+		blt	.cfu_3rem8lp
+	PLD(	pld	[r1, #12]		)
+	PLD(	pld	[r0, #12]		)
+	PLD(	subs	ip, ip, #32		)
+	PLD(	blt	.cfu_3cpynopld		)
+	PLD(	pld	[r1, #28]		)
+	PLD(	pld	[r0, #28]		)
+
+.cfu_3cpy8lp:
+	PLD(	pld	[r1, #44]		)
+	PLD(	pld	[r0, #44]		)
+.cfu_3cpynopld:	mov	r3, r7, pull #24
+		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
+		orr	r3, r3, r4, push #8
+		mov	r4, r4, pull #24
+		orr	r4, r4, r5, push #8
+		mov	r5, r5, pull #24
+		orr	r5, r5, r6, push #8
+		mov	r6, r6, pull #24
+		orr	r6, r6, r7, push #8
+		stmia	r0!, {r3 - r6}
+		subs	ip, ip, #16
+		bpl	.cfu_3cpy8lp
+	PLD(	cmn	ip, #32			)
+	PLD(	bge	.cfu_3cpynopld		)
+	PLD(	add	ip, ip, #32		)
+
+.cfu_3rem8lp:	tst	ip, #8
+		movne	r3, r7, pull #24
+		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
+		orrne	r3, r3, r4, push #8
+		movne	r4, r4, pull #24
+		orrne	r4, r4, r7, push #8
+		stmneia	r0!, {r3 - r4}
+		tst	ip, #4
+		movne	r3, r7, pull #24
+USER(		ldrnet	r7, [r1], #4)			@ May fault
+		orrne	r3, r3, r7, push #8
+		strne	r3, [r0], #4
+		ands	ip, ip, #3
+		beq	.cfu_3fupi
+.cfu_3nowords:	mov	r3, r7, get_byte_3
+		teq	ip, #0
+		beq	.cfu_finished
+		cmp	ip, #2
+		strb	r3, [r0], #1
+USER(		ldrgebt	r3, [r1], #1)			@ May fault
+		strgeb	r3, [r0], #1
+USER(		ldrgtbt	r3, [r1], #1)			@ May fault
+		strgtb	r3, [r0], #1
+		b	.cfu_finished
+
+		.section .fixup,"ax"
+		.align	0
+		/*
+		 * We took an exception.  r0 contains a pointer to
+		 * the byte not copied.
+		 */
+9001:		ldr	r2, [sp], #4			@ void *to
+		sub	r2, r0, r2			@ bytes copied
+		ldr	r1, [sp], #4			@ unsigned long count
+		subs	r4, r1, r2			@ bytes left to copy
+		movne	r1, r4
+		blne	__memzero
+		mov	r0, r4
+		ldmfd	sp!, {r4 - r7, pc}
+		.previous
+
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
+ * Purpose  : clear some user memory
+ * Params   : addr - user memory address to clear
+ *          : sz   - number of bytes to clear
+ * Returns  : number of bytes NOT cleared
+ */
+ENTRY(__arch_clear_user)
+		stmfd	sp!, {r1, lr}
+		mov	r2, #0
+		cmp	r1, #4
+		blt	2f
+		ands	ip, r0, #3
+		beq	1f
+		cmp	ip, #2
+USER(		strbt	r2, [r0], #1)
+USER(		strlebt	r2, [r0], #1)
+USER(		strltbt	r2, [r0], #1)
+		rsb	ip, ip, #4
+		sub	r1, r1, ip		@  7  6  5  4  3  2  1
+1:		subs	r1, r1, #8		@ -1 -2 -3 -4 -5 -6 -7
+USER(		strplt	r2, [r0], #4)
+USER(		strplt	r2, [r0], #4)
+		bpl	1b
+		adds	r1, r1, #4		@  3  2  1  0 -1 -2 -3
+USER(		strplt	r2, [r0], #4)
+2:		tst	r1, #2			@ 1x 1x 0x 0x 1x 1x 0x
+USER(		strnebt	r2, [r0], #1)
+USER(		strnebt	r2, [r0], #1)
+		tst	r1, #1			@ x1 x0 x1 x0 x1 x0 x1
+USER(		strnebt	r2, [r0], #1)
+		mov	r0, #0
+		ldmfd	sp!, {r1, pc}
+
+		.section .fixup,"ax"
+		.align	0
+9001:		ldmfd	sp!, {r0, pc}
+		.previous
+
diff -r e701461b1251 xen/arch/arm/lib/udivdi3.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/udivdi3.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,242 @@
+/* More subroutines needed by GCC output code on some machines.  */
+/* Compile this one with gcc.  */
+/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+/* As a special exception, if you link this library with other files,
+   some of which are compiled with GCC, to produce an executable,
+   this library does not by itself cause the resulting executable
+   to be covered by the GNU General Public License.
+   This exception does not however invalidate any other reasons why
+   the executable file might be covered by the GNU General Public License.
+ */
+/* support functions required by the kernel. based on code from gcc-2.95.3 */
+/* I Molton     29/07/01 */
+
+#include "gcclib.h"
+#include "longlong.h"
+
+static const UQItype __clz_tab[] =
+{
+  0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+};
+
+UDItype
+__udivmoddi4 (UDItype n, UDItype d, UDItype *rp)
+{
+  DIunion ww;
+  DIunion nn, dd;
+  DIunion rr;
+  USItype d0, d1, n0, n1, n2;
+  USItype q0, q1;
+  USItype b, bm;
+
+  nn.ll = n;
+  dd.ll = d;
+
+  d0 = dd.s.low;
+  d1 = dd.s.high;
+  n0 = nn.s.low;
+  n1 = nn.s.high;
+
+  if (d1 == 0)
+    {
+      if (d0 > n1)
+        {
+          /* 0q = nn / 0D */
+
+          count_leading_zeros (bm, d0);
+
+          if (bm != 0)
+            {
+              /* Normalize, i.e. make the most significant bit of the
+                 denominator set.  */
+
+              d0 = d0 << bm;
+              n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm));
+              n0 = n0 << bm;
+            }
+
+          udiv_qrnnd (q0, n0, n1, n0, d0);
+          q1 = 0;
+
+          /* Remainder in n0 >> bm.  */
+        }
+      else
+        {
+          /* qq = NN / 0d */
+
+          if (d0 == 0)
+            d0 = 1 / d0;        /* Divide intentionally by zero.  */
+
+          count_leading_zeros (bm, d0);
+
+          if (bm == 0)
+            {
+              /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
+                 conclude (the most significant bit of n1 is set) /\ (the
+                 leading quotient digit q1 = 1).
+
+                 This special case is necessary, not an optimization.
+                 (Shifts counts of SI_TYPE_SIZE are undefined.)  */
+
+              n1 -= d0;
+              q1 = 1;
+            }
+          else
+            {
+              /* Normalize.  */
+
+              b = SI_TYPE_SIZE - bm;
+
+              d0 = d0 << bm;
+              n2 = n1 >> b;
+              n1 = (n1 << bm) | (n0 >> b);
+              n0 = n0 << bm;
+
+              udiv_qrnnd (q1, n1, n2, n1, d0);
+            }
+
+          /* n1 != d0...  */
+
+          udiv_qrnnd (q0, n0, n1, n0, d0);
+
+          /* Remainder in n0 >> bm.  */
+        }
+
+      if (rp != 0)
+        {
+          rr.s.low = n0 >> bm;
+          rr.s.high = 0;
+          *rp = rr.ll;
+        }
+    }
+  else
+    {
+      if (d1 > n1)
+        {
+          /* 00 = nn / DD */
+
+          q0 = 0;
+          q1 = 0;
+
+          /* Remainder in n1n0.  */
+          if (rp != 0)
+            {
+              rr.s.low = n0;
+              rr.s.high = n1;
+              *rp = rr.ll;
+            }
+        }
+      else
+        {
+          /* 0q = NN / dd */
+
+          count_leading_zeros (bm, d1);
+          if (bm == 0)
+            {
+              /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
+                 conclude (the most significant bit of n1 is set) /\ (the
+                 quotient digit q0 = 0 or 1).
+
+                 This special case is necessary, not an optimization.  */
+
+              /* The condition on the next line takes advantage of that
+                 n1 >= d1 (true due to program flow).  */
+              if (n1 > d1 || n0 >= d0)
+                {
+                  q0 = 1;
+                  sub_ddmmss (n1, n0, n1, n0, d1, d0);
+                }
+              else
+                q0 = 0;
+
+              q1 = 0;
+
+              if (rp != 0)
+                {
+                  rr.s.low = n0;
+                  rr.s.high = n1;
+                  *rp = rr.ll;
+                }
+            }
+          else
+            {
+              USItype m1, m0;
+              /* Normalize.  */
+
+              b = SI_TYPE_SIZE - bm;
+
+              d1 = (d1 << bm) | (d0 >> b);
+              d0 = d0 << bm;
+              n2 = n1 >> b;
+              n1 = (n1 << bm) | (n0 >> b);
+              n0 = n0 << bm;
+
+              udiv_qrnnd (q0, n1, n2, n1, d1);
+              umul_ppmm (m1, m0, q0, d0);
+
+              if (m1 > n1 || (m1 == n1 && m0 > n0))
+                {
+                  q0--;
+                  sub_ddmmss (m1, m0, m1, m0, d1, d0);
+                }
+
+              q1 = 0;
+
+              /* Remainder in (n1n0 - m1m0) >> bm.  */
+              if (rp != 0)
+                {
+                  sub_ddmmss (n1, n0, n1, n0, m1, m0);
+                  rr.s.low = (n1 << b) | (n0 >> bm);
+                  rr.s.high = n1 >> bm;
+                  *rp = rr.ll;
+                }
+            }
+        }
+    }
+
+  ww.s.low = q0;
+  ww.s.high = q1;
+  return ww.ll;
+}
+
+UDItype
+__udivdi3 (UDItype n, UDItype d)
+{
+  return __udivmoddi4 (n, d, (UDItype *) 0);
+}
+
+UDItype
+__umoddi3 (UDItype u, UDItype v)
+{
+  UDItype w;
+
+  (void) __udivmoddi4 (u ,v, &w);
+
+  return w;
+}
+
diff -r e701461b1251 xen/arch/arm/lib/uldivmod.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/lib/uldivmod.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,148 @@
+/*
+* A, Q = r0 + (r1 << 32)
+* B, R = r2 + (r3 << 32)
+* A / B = Q ... R
+*/
+ 
+.text
+.global	__aeabi_uldivmod
+.type	__aeabi_uldivmod, function
+.align	0
+A_0	.req	r0
+A_1	.req	r1
+B_0	.req	r2
+B_1	.req	r3
+C_0	.req	r4
+C_1	.req	r5
+D_0	.req	r6
+D_1	.req	r7
+Q_0	.req	r0
+Q_1	.req	r1
+R_0	.req	r2
+R_1	.req	r3
+ 
+__aeabi_uldivmod:
+	stmfd	sp!, {r4, r5, r6, r7, lr}
+ 
+	@ Test if B == 0
+	orrs	ip, B_0, B_1		@ Z set -> B == 0
+	beq	L_div_by_0
+	@ Test if B is power of 2: (B & (B - 1)) == 0
+	subs	C_0, B_0, #1
+	sbc	C_1, B_1, #0
+	tst	C_0, B_0
+	tsteq	B_1, C_1
+	beq	L_pow2
+	@ Test if A_1 == B_1 == 0
+	orrs	ip, A_1, B_1
+	beq	L_div_32_32
+
+L_div_64_64:
+	mov	C_0, #1
+	mov	C_1, #0
+	@ D_0 = clz A
+	teq	A_1, #0
+	clz	D_0, A_1
+	clzeq	ip, A_0
+	addeq	D_0, D_0, ip
+	@ D_1 = clz B
+	teq	B_1, #0
+	clz	D_1, B_1
+	clzeq	ip, B_0
+	addeq	D_1, D_1, ip
+	@ if clz B - clz A > 0
+	subs	D_0, D_1, D_0
+	bls	L_done_shift
+	@ B <<= (clz B - clz A)
+	subs	D_1, D_0, #32
+	rsb	ip, D_0, #32
+	movmi	B_1, B_1, lsl D_0
+	orrmi	B_1, B_1, B_0, lsr ip
+	movpl	B_1, B_0, lsl D_1
+	mov	B_0, B_0, lsl D_0
+	@ C = 1 << (clz B - clz A)
+	movmi	C_1, C_1, lsl D_0
+	orrmi	C_1, C_1, C_0, lsr ip
+	movpl	C_1, C_0, lsl D_1
+	mov	C_0, C_0, lsl D_0
+L_done_shift:
+	mov	D_0, #0
+	mov	D_1, #0
+	@ C: current bit; D: result
+L_subtract:
+	@ if A >= B
+	cmp	A_1, B_1
+	cmpeq	A_0, B_0
+	bcc	L_update
+	@ A -= B
+	subs	A_0, A_0, B_0
+	sbc	A_1, A_1, B_1
+	@ D |= C
+	orr	D_0, D_0, C_0
+	orr	D_1, D_1, C_1
+L_update:
+	@ if A == 0: break
+	orrs	ip, A_1, A_0
+	beq	L_exit
+	@ C >>= 1
+	movs	C_1, C_1, lsr #1
+	movs	C_0, C_0, rrx
+	@ if C == 0: break
+	orrs	ip, C_1, C_0
+	beq	L_exit
+	@ B >>= 1
+	movs	B_1, B_1, lsr #1
+	mov	B_0, B_0, rrx
+	b	L_subtract
+L_exit:
+	@ Note: A, B & Q, R are aliases
+	mov	R_0, A_0
+	mov	R_1, A_1
+	mov	Q_0, D_0
+	mov	Q_1, D_1
+	ldmfd	sp!, {r4, r5, r6, r7, pc}
+
+L_div_32_32:
+	@ Note:	A_0 &	r0 are aliases
+	@	Q_1	r1
+	mov	r1, B_0
+	bl	__aeabi_uidivmod
+	mov	R_0, r1
+	mov	R_1, #0
+	mov	Q_1, #0
+	ldmfd	sp!, {r4, r5, r6, r7, pc}
+ 
+L_pow2:
+	@ Note: A, B and Q, R are aliases
+	@ R = A & (B - 1)
+	and	C_0, A_0, C_0
+	and	C_1, A_1, C_1
+	@ Q = A >> log2(B)
+	@ Note: B must not be 0 here!
+	clz	D_0, B_0
+	add	D_1, D_0, #1
+	rsbs	D_0, D_0, #31
+	bpl	L_1
+	clz	D_0, B_1
+	rsb	D_0, D_0, #31
+	mov	A_0, A_1, lsr D_0
+	add	D_0, D_0, #32
+L_1:
+	movpl	A_0, A_0, lsr D_0
+	orrpl	A_0, A_0, A_1, lsl D_1
+	mov	A_1, A_1, lsr D_0
+	@ Mov back C to R
+	mov	R_0, C_0
+	mov	R_1, C_1
+	ldmfd	sp!, {r4, r5, r6, r7, pc}
+
+L_div_by_0:
+	bl	__div0
+	@ As wrong as it could be
+	mov	Q_0, #0
+	mov	Q_1, #0
+	mov	R_0, #0
+	mov	R_1, #0
+	ldmfd	sp!, {r4, r5, r6, r7, pc}
+ 
+
diff -r e701461b1251 xen/arch/arm/tegra/Makefile
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/tegra/Makefile	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,1 @@
+obj-y += dummy.o
diff -r e701461b1251 xen/arch/arm/tegra/Rules.mk
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/tegra/Rules.mk	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,1 @@
+CFLAGS-y += -march=armv7-a
diff -r e701461b1251 xen/arch/arm/tegra/dummy.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/tegra/dummy.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,3 @@
+void dummy(void)
+{
+}
diff -r e701461b1251 xen/arch/arm/xen/Makefile
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/Makefile	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,19 @@
+obj-y += setup.o
+obj-y += mm.o
+obj-y += irq.o
+obj-y += arch_domain.o
+obj-y += time.o
+obj-y += domain_build.o
+obj-y += fault.o
+obj-y += tlb.o
+obj-y += shutdown.o
+obj-y += arch_domctl.o
+obj-y += cpu.o
+obj-y += iommu.o
+obj-y += grant_table.o
+obj-y += arch_sysctl.o
+obj-y += machine_kexec.o
+obj-y += crash.o
+obj-y += p2m.o
+obj-y += perfmon.o
+obj-y += pci.o
diff -r e701461b1251 xen/arch/arm/xen/arch_domain.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/arch_domain.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,212 @@
+/*
+ * arch_domain.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh    <sbuk.suh@samsung.com>
+ *          Jaemin Ryu      <jm77.ryu@samsung.com>
+ *          JooYoung Hwang  <jooyoung.hwang@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <stdarg.h>
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <xen/domain.h>
+#include <xen/errno.h>
+#include <xen/smp.h>
+#include <xen/irq.h>
+#include <xen/irq_cpustat.h>
+#include <xen/softirq.h>
+
+void arch_dump_domain_info(struct domain *d)
+{
+	NOT_YET();
+}
+
+void arch_dump_vcpu_info(struct vcpu *v)
+{
+	NOT_YET();
+}
+
+unsigned long hypercall_create_continuation(unsigned int op,
+        const char *format, ...)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+int arch_domain_create(struct domain *d, unsigned int domcr_flags)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
+void arch_domain_destroy(struct domain *d)
+{
+	NOT_YET();
+}
+
+struct vcpu_guest_context *alloc_vcpu_guest_context(void)
+{
+	NOT_YET();
+
+	return NULL;
+}
+
+void free_vcpu_guest_context(struct vcpu_guest_context *context)
+{
+	NOT_YET();
+}
+
+
+struct vcpu *alloc_vcpu_struct(void)
+{
+	NOT_YET();
+	return NULL;
+}
+
+void arch_vcpu_reset(struct vcpu *v)
+{
+	NOT_YET();
+}
+
+int vcpu_initialise(struct vcpu *v)
+{
+	NOT_YET();
+	return 0;
+}
+
+void vcpu_destroy(struct vcpu *v)
+{
+	NOT_YET();
+}
+
+void free_vcpu_struct(struct vcpu *v)
+{
+	NOT_YET();
+}
+
+struct domain *alloc_domain_struct(void)
+{
+	NOT_YET();
+
+	return NULL;
+}
+
+
+void free_domain_struct(struct domain *d)
+{
+	NOT_YET();
+}
+
+int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_t *ctx)
+{
+	NOT_YET();
+
+	return 0;
+
+}
+
+void domain_relinquish_memory(struct domain *d)
+{
+	NOT_YET();
+}
+
+void dump_pageframe_info(struct domain *d)
+{
+	NOT_YET();
+}
+
+void context_switch(struct vcpu *prev, struct vcpu *next)
+{
+	NOT_YET();
+}
+
+void continue_running(struct vcpu *same)
+{
+	NOT_YET();
+}
+
+void sync_lazy_execstate_cpu(unsigned int cpu)
+{
+	NOT_YET();
+}
+
+void sync_lazy_execstate_mask(cpumask_t mask)
+{
+	NOT_YET();
+}
+
+void sync_vcpu_execstate(struct vcpu *v)
+{
+	NOT_YET();
+}
+
+void sync_local_execstate(void)
+{
+	NOT_YET();
+}
+
+void relinquish_memory(struct domain *d, struct list_head *list)
+{
+	NOT_YET();
+}
+
+int domain_relinquish_resources(struct domain *d)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
+void startup_cpu_idle_loop(void)
+{
+	NOT_YET();
+}
+
+long arch_do_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg)
+{
+	NOT_YET();
+
+	return -ENOSYS;
+}
+
+void vcpu_kick(struct vcpu *v)
+{
+	NOT_YET();
+}
+
+void vcpu_mark_events_pending(struct vcpu *v)
+{
+	NOT_YET();
+}
+
+static void vcpu_kick_softirq(void)
+{
+	NOT_YET();
+}
+
+static int __init vcpu_kick_softirq_init(void)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+__initcall(vcpu_kick_softirq_init);
diff -r e701461b1251 xen/arch/arm/xen/arch_domctl.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/arch_domctl.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,43 @@
+/*
+ * arch_domctl.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ *          Jaemin Ryu      <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <stdarg.h>
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <xen/domain.h>
+#include <xen/errno.h>
+#include <xen/smp.h>
+#include <xen/irq_cpustat.h>
+#include <xen/softirq.h>
+
+
+void arch_get_info_guest(struct vcpu *v, struct vcpu_guest_context *ctx)
+{
+	NOT_YET();
+}
+
+long arch_do_domctl(struct xen_domctl *domctl, XEN_GUEST_HANDLE(xen_domctl_t)r_domctl)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
diff -r e701461b1251 xen/arch/arm/xen/arch_sysctl.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/arch_sysctl.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,38 @@
+/*
+ * arch_sysctl.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          Jaemin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <stdarg.h>
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <xen/domain.h>
+#include <xen/errno.h>
+#include <xen/smp.h>
+#include <xen/irq_cpustat.h>
+#include <xen/softirq.h>
+
+long arch_do_sysctl(struct xen_sysctl *sysctl, XEN_GUEST_HANDLE(xen_sysctl_t)u_sysctl)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
diff -r e701461b1251 xen/arch/arm/xen/asm-offsets.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/asm-offsets.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,40 @@
+#include <xen/config.h>
+#include <xen/mm.h>
+#include <xen/perfc.h>
+#include <xen/sched.h>
+#include <asm/hardirq.h>
+#include <asm/current.h>
+
+#if defined(__APCS_26__)
+#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
+#endif
+/*
+ * GCC 2.95.1, 2.95.2: ignores register clobber list in asm().
+ * GCC 3.0, 3.1: general bad code generation.
+ * GCC 3.2.0: incorrect function argument offset calculation.
+ * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
+ *            (http://gcc.gnu.org/PR8896) and incorrect structure
+ *	      initialisation in fs/jffs2/erase.c
+ */
+#if __GNUC__ < 2 || \
+   (__GNUC__ == 2 && __GNUC_MINOR__ < 95) || \
+   (__GNUC__ == 2 && __GNUC_MINOR__ == 95 && __GNUC_PATCHLEVEL__ != 0 && \
+					     __GNUC_PATCHLEVEL__ < 3) || \
+   (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#error Your compiler is too buggy; it is known to miscompile kernels.
+#error    Known good compilers: 2.95.3, 2.95.4, 2.96, 3.3
+#endif
+
+/* Use marker if you need to separate the values later */
+
+#define DEFINE(sym, val) \
+        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+	BLANK();
+
+	return 0; 
+}
diff -r e701461b1251 xen/arch/arm/xen/bug.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/bug.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,32 @@
+#include <xen/stdarg.h>
+#include <xen/config.h>
+#include <xen/version.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/event.h>
+#include <xen/spinlock.h>
+#include <xen/console.h>
+#include <xen/serial.h>
+#include <xen/softirq.h>
+#include <xen/keyhandler.h>
+#include <xen/mm.h>
+#include <xen/delay.h>
+#include <xen/guest_access.h>
+#include <xen/shutdown.h>
+#include <asm/current.h>
+#include <asm/debugger.h>
+
+void bug(char *file, int line)
+{
+	panic("Xen BUG at %s:%d\n", file, line);
+
+	while(1);
+}
+
+void warn(char *file, int line)
+{
+	printk("Xen WARN at %s:%d\n", file, line);
+
+}
+
diff -r e701461b1251 xen/arch/arm/xen/cpu.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/cpu.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,97 @@
+/*
+ * cpu.c
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as 
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/config.h>
+#include <xen/spinlock.h>
+#include <xen/cpumask.h>
+#include <xen/smp.h>
+#include <xen/irq.h>
+#include <xen/softirq.h>
+#include <xen/sched.h>
+#include <xen/preempt.h>
+#include <xen/percpu.h>
+
+cpumask_t cpu_online_map;
+cpumask_t cpu_present_map;
+cpumask_t cpu_possible_map;
+
+nodemask_t node_online_map = {{ [0] = 1UL }};
+
+unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
+        [0 ... NR_CPUS-1] = NUMA_NO_NODE
+};
+
+cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
+
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t,cpu_sibling_mask);
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t,cpu_core_mask);
+
+int __cpu_up(unsigned int cpu)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+void __cpu_disable(void)
+{
+	NOT_YET();
+}
+
+void __cpu_die(unsigned int cpu)
+{
+	NOT_YET();
+}
+
+void set_cpu_sibling_map(unsigned int cpu)
+{
+	NOT_YET();
+}
+
+void smp_prepare_cpus(unsigned int max_cpus)
+{
+	NOT_YET();
+}
+
+void smp_prepare_boot_cpu(void)
+{
+	NOT_YET();
+}
+
+asmlinkage void start_xen_on_slave_cpu(void)
+{
+	NOT_YET();
+}
+
+void smp_send_event_check_mask(const cpumask_t *mask)
+{
+	NOT_YET();
+}
+
+void smp_call_function(void (*f)(void *param), void *param, int wait)
+{
+	NOT_YET();
+}
+
+void smp_send_state_dump(unsigned int cpu)
+{
+	NOT_YET();
+}
diff -r e701461b1251 xen/arch/arm/xen/crash.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/crash.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,25 @@
+/*
+ * crash.c
+ *
+ * Copyright (C) 2008 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+void machine_crash_shutdown(void)
+{
+}
+
diff -r e701461b1251 xen/arch/arm/xen/domain_build.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/domain_build.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,47 @@
+/*
+ * domain_build.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/errno.h>
+#include <xen/compile.h>
+#include <xen/sched.h>
+#include <xen/elf.h>
+#include <xen/domain.h>
+#include <xen/mm.h>
+#include <xen/iocap.h>
+#include <xen/xmalloc.h>
+#include <xen/preempt.h>
+#include <xen/libelf.h>
+#include <public/xen.h>
+#include <public/version.h>
+
+/*
+ * domain_construct() should be always invoked in idle domain
+ */
+int domain_construct(struct domain *d, 
+		     unsigned long img_start, unsigned long img_len, 
+		     unsigned long dom_size, unsigned int vcpus)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
diff -r e701461b1251 xen/arch/arm/xen/domain_page.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/domain_page.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,22 @@
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/perfc.h>
+#include <xen/domain_page.h>
+
+#ifdef CONFIG_DOMAIN_PAGE
+
+void *map_domain_page(unsigned long pfn)
+{
+	NOT_YET();
+
+	return NULL;
+}
+
+void unmap_domain_page(void *va)
+{
+	NOT_YET();
+}
+
+#endif
+
diff -r e701461b1251 xen/arch/arm/xen/fault.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/fault.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,123 @@
+/*
+ * traps.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/config.h>
+#include <xen/compile.h>
+#include <xen/domain_page.h>
+#include <xen/init.h>
+#include <xen/sched.h>
+#include <xen/lib.h>
+#include <xen/console.h>
+#include <xen/mm.h>
+#include <xen/irq.h>
+#include <xen/symbols.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+#include <asm/guest_access.h>
+#include <asm/system.h>
+#include <asm/memory.h>
+
+asmlinkage void __div0(void)
+{
+        printk("Division by zero in kernel.\n");
+}
+
+int fixup_exception(struct cpu_user_regs *regs)
+{
+	return -EINVAL;
+}
+
+void show_registers(struct cpu_user_regs *ctx)
+{
+}
+
+void dump_execution_state(void)
+{
+}
+
+void show_execution_state(struct cpu_user_regs *regs)
+{
+	printk("Not implemented\n");
+}
+
+static int verify_stack(unsigned long sp)
+{
+	return 0;
+}
+
+static void backtrace(struct cpu_user_regs *ctx)
+{
+}
+
+static void unrecoverable_fault(const char *str, int err, struct vcpu *v, struct cpu_ctx *ctx)
+{
+	printk("Unrecoverable Fault : %s\n", str);
+
+	while(1);
+
+}
+
+long do_set_callbacks(unsigned long event, unsigned long failsafe)
+{
+	return -EINVAL;
+
+}
+
+asmlinkage void do_prefetch_abort(unsigned long pc, struct cpu_ctx *ctx)
+{
+	while(1);
+	unrecoverable_fault("prefetch abort", 0, current, ctx);
+}
+
+asmlinkage void do_data_abort(unsigned long fsr, unsigned long far, struct cpu_ctx *ctx)
+{
+	while(1);
+	unrecoverable_fault("data abort", 0, current, ctx);
+}
+
+asmlinkage void do_undefined_instruction(unsigned long pc, struct cpu_ctx *ctx)
+{
+	while(1);
+	unrecoverable_fault("undefined instruction", 0, current, ctx);
+}
+
+void vcpu_show_execution_state(struct vcpu *v)
+{
+	printk("Not implemented\n");
+}
+
+long register_guest_nmi_callback(unsigned long address)
+{
+	printk("Not implemented yet\n");
+
+	return -1;
+}
+
+void unregister_guest_nmi_callback(void)
+{
+	printk("Not implemented yet\n");
+}
+
+long do_set_trap_table(XEN_GUEST_HANDLE(trap_info_t) traps)
+{
+	return -EFAULT;
+}
+
diff -r e701461b1251 xen/arch/arm/xen/grant_table.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/grant_table.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,53 @@
+/*
+ * grant_table.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          SungKwan Heo <sk.heo@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/lib.h>
+#include <xen/types.h>
+#include <xen/cpumask.h>
+#include <xen/list.h>
+#include <xen/kernel.h>
+#include <xen/string.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <xen/domain_page.h>
+#include <xen/irq_cpustat.h>
+#include <xen/event.h>
+#include <xen/iocap.h>
+#include <xen/perfc.h>
+#include <xen/guest_access.h>
+
+
+int create_grant_host_mapping(uint64_t addr, unsigned long frame, unsigned int flags, unsigned int cache_flags)
+{
+	NOT_YET();
+	
+	return -EINVAL;
+}
+
+int replace_grant_host_mapping(uint64_t addr, unsigned long frame, uint64_t new_addr, unsigned int flags)
+{
+	NOT_YET();
+
+	return GNTST_general_error;
+}
+
diff -r e701461b1251 xen/arch/arm/xen/iommu.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/iommu.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,24 @@
+
+#include <xen/lib.h>
+#include <xen/types.h>
+#include <xen/list.h>
+#include <xen/string.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <xen/iocap.h>
+#include <asm/iommu.h>
+
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn, unsigned int flags)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
+int iommu_unmap_page(struct domain *d, unsigned long gfn)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
diff -r e701461b1251 xen/arch/arm/xen/irq.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/irq.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,84 @@
+/*
+ * irq.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/irq.h>
+#include <xen/errno.h>
+#include <xen/spinlock.h>
+#include <xen/sched.h>
+#include <xen/event.h>
+#include <public/event_channel.h>
+#include <public/physdev.h>
+#include <public/arch-arm.h>
+
+hw_irq_controller no_irq_type = {
+	.typename = "none",
+	.startup  = irq_startup_none,
+	.shutdown = irq_shutdown_none,
+	.enable   = irq_enable_none,
+	.disable  = irq_disable_none,
+};
+
+struct irq_desc *irq_desc;
+
+int pirq_guest_unmask(struct domain *d)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+void pirq_guest_unbind(struct domain *d, struct pirq *pirq)
+{
+	NOT_YET();
+}
+
+
+void pirq_set_affinity(struct domain *d, int pirq, const cpumask_t *mask)
+{
+	NOT_YET();
+}
+
+
+struct pirq *alloc_pirq_struct(struct domain *d)
+{
+	NOT_YET();
+
+	return NULL;
+}
+
+int arch_init_one_irq_desc(struct irq_desc *desc)
+{
+	NOT_YET();
+
+	return 0;
+}
+
diff -r e701461b1251 xen/arch/arm/xen/machine_kexec.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/machine_kexec.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,31 @@
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/smp.h>
+#include <xen/types.h>
+#include <xen/console.h>
+#include <xen/kexec.h>
+#include <xen/domain_page.h>
+
+int machine_kexec_load(int type, int slot, xen_kexec_image_t *image)
+{
+    return -EINVAL;
+}
+
+void machine_kexec_unload(int type, int slot, xen_kexec_image_t *image)
+{
+}
+
+void machine_reboot_kexec(xen_kexec_image_t *image)
+{
+}
+
+void machine_kexec(xen_kexec_image_t *image)
+{
+}
+
+int machine_kexec_get(xen_kexec_range_t *range)
+{
+	return -EINVAL;
+}
+
diff -r e701461b1251 xen/arch/arm/xen/mm.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/mm.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,194 @@
+/*
+ * mm.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh  <sbuk.suh@samsung.com>
+ *          JaeMin Ryu    <jm77.ryu@samsung.com>
+ *          SungKwan Heo  <sk.heo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/lib.h>
+#include <xen/types.h>
+#include <xen/cpumask.h>
+#include <xen/list.h>
+#include <xen/kernel.h>
+#include <xen/string.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <xen/domain_page.h>
+#include <xen/irq_cpustat.h>
+#include <xen/event.h>
+#include <xen/iocap.h>
+#include <xen/perfc.h>
+#include <xen/guest_access.h>
+
+#define VERBOSE 1
+
+#define MMU_UPDATE_PREEMPTED          (~(~0U >> 1))
+
+static unsigned long mpt_size;
+
+/* Frame table and its size in pages. */
+struct page_info *frame_table;
+unsigned long min_page = ~0UL;;
+unsigned long max_page = 0UL;
+
+unsigned long xenheap_phys_start = ~0UL;
+unsigned long xenheap_phys_end = 0UL;
+
+unsigned long xen_phys_start = ~0UL;
+unsigned long xen_phys_end = 0UL;
+
+#ifdef MEMORY_GUARD
+void memguard_init(void)
+{
+	NOT_YET();
+}
+
+void memguard_guard_range(void *p, unsigned long l)
+{
+	NOT_YET();
+}
+
+void memguard_unguard_range(void *p, unsigned long l)
+{
+	NOT_YET();
+}
+
+#endif
+
+void put_page(struct page_info *page)
+{
+	NOT_YET();
+}
+
+struct domain *page_get_owner_and_reference(struct page_info *page)
+{
+	NOT_YET();
+}
+
+int get_page(struct page_info *page, struct domain *domain)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+void share_xen_page_with_guest(struct page_info *page, struct domain *d, int readonly)
+{
+	NOT_YET();
+}
+
+void share_xen_page_with_privileged_guests(struct page_info *page, int readonly)
+{
+	NOT_YET();
+}
+
+static int pin_page_table(u32 mfn, struct domain *d)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+static int unpin_page_table(u32 mfn, struct domain *d)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+void free_page_type(struct page_info *page, unsigned long type)
+{
+	NOT_YET();
+}
+
+void put_page_type(struct page_info *page)
+{
+	NOT_YET();
+}
+
+
+int get_page_type(struct page_info *page, unsigned long type)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+int do_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_t) uops, unsigned int count,
+		 XEN_GUEST_HANDLE(uint) pdone, unsigned int foreigndom)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
+int do_mmu_update(XEN_GUEST_HANDLE(mmu_update_t) ureqs,
+		  unsigned int count, 
+		  XEN_GUEST_HANDLE(uint) pdone,
+		  unsigned int foreigndom)
+{
+	NOT_YET();
+
+        return -EINVAL;
+}
+
+int do_update_va_mapping(u32 va, u32 flags, u64 val64)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
+
+
+int steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
+int donate_page(struct domain *d, struct page_info *page, unsigned int memflags)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
+
+
+unsigned long domain_get_maximum_gpfn(struct domain *d)
+{
+	NOT_YET();
+
+	return 0xFFFFFFFF;
+}
+
+int page_is_ram_type(unsigned long mfn, unsigned long mem_type)
+{
+	NOT_YET();
+
+	return -EINVAL;
+}
diff -r e701461b1251 xen/arch/arm/xen/p2m.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/p2m.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,44 @@
+/*
+ * p2m.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh  <sbuk.suh@samsung.com>
+ *          JaeMin Ryu    <jm77.ryu@samsung.com>
+ *          SungKwan Heo  <sk.heo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <asm/domain.h>
+#include <asm/page.h>
+#include <asm/paging.h>
+#include <asm/p2m.h>
+#include <xen/event.h>
+
+int p2m_pod_decrease_reservation(struct domain *d,
+                             xen_pfn_t gpfn,
+                             unsigned int order)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
+                                      unsigned int order)
+{
+	NOT_YET();
+
+	return 0;
+}
diff -r e701461b1251 xen/arch/arm/xen/pci.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/pci.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,74 @@
+/******************************************************************************
+ * pci.c
+ * 
+ * Architecture-dependent PCI access functions.
+ */
+
+#include <xen/spinlock.h>
+#include <xen/pci.h>
+#include <asm/io.h>
+
+static DEFINE_SPINLOCK(pci_config_lock);
+
+uint32_t pci_conf_read(uint32_t cf8, uint8_t offset, uint8_t bytes)
+{
+    unsigned long flags;
+    uint32_t value;
+
+    BUG_ON((offset + bytes) > 4);
+
+    spin_lock_irqsave(&pci_config_lock, flags);
+
+    outl(cf8, 0xcf8);
+
+    switch ( bytes )
+    {
+    case 1:
+        value = inb(0xcfc + offset);
+        break;
+    case 2:
+        value = inw(0xcfc + offset);
+        break;
+    case 4:
+        value = inl(0xcfc + offset);
+        break;
+    default:
+        value = 0;
+        BUG();
+    }
+
+    spin_unlock_irqrestore(&pci_config_lock, flags);
+
+    return value;
+}
+
+void pci_conf_write(uint32_t cf8, uint8_t offset, uint8_t bytes, uint32_t data)
+{
+    unsigned long flags;
+
+    BUG_ON((offset + bytes) > 4);
+
+    spin_lock_irqsave(&pci_config_lock, flags);
+
+    outl(cf8, 0xcf8);
+
+    switch ( bytes )
+    {
+    case 1:
+        outb((uint8_t)data, 0xcfc + offset);
+        break;
+    case 2:
+        outw((uint16_t)data, 0xcfc + offset);
+        break;
+    case 4:
+        outl(data, 0xcfc + offset);
+        break;
+    }
+
+    spin_unlock_irqrestore(&pci_config_lock, flags);
+}
+
+
+#define PCI_CONF_ADDRESS(bus, dev, func, reg) \
+    (0x80000000 | (bus << 16) | (dev << 11) | (func << 8) | (reg & ~3))
+
diff -r e701461b1251 xen/arch/arm/xen/perfmon.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/perfmon.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,26 @@
+#include <xen/event.h>
+#include <xen/types.h>
+#include <xen/errno.h>
+#include <xen/init.h>
+#include <xen/nmi.h>
+#include <xen/string.h>
+#include <xen/delay.h>
+#include <xen/xenoprof.h>
+#include <public/xen.h>
+
+
+int xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+
+int xenoprof_arch_ibs_counter(XEN_GUEST_HANDLE(void) arg)
+{
+	NOT_YET();
+
+	return 0;
+}
+
diff -r e701461b1251 xen/arch/arm/xen/setup.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/setup.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,64 @@
+/*
+ * setup.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh   <sbuk.suh@samsung.com>
+ *     	    Jaemin Ryu     <jm77.ryu@samsung.com>
+ *          JooYoung Hwang <jooyoung.hwang@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <xen/compile.h>
+#include <xen/string.h>
+#include <xen/lib.h>
+#include <xen/preempt.h>
+#include <public/version.h>
+#include <public/sched.h>
+
+
+struct domain _dom_xen = {
+        .refcnt = ATOMIC_INIT(1),
+        .domain_id = DOMID_XEN,
+        .domain_lock = SPIN_LOCK_UNLOCKED,
+};
+
+struct domain _dom_io = {
+        .refcnt = ATOMIC_INIT(1),
+        .domain_id = DOMID_IO,
+        .domain_lock = SPIN_LOCK_UNLOCKED,
+};
+
+struct domain _dom_cow = {
+        .refcnt = ATOMIC_INIT(1),
+        .domain_id = DOMID_COW,
+        .domain_lock = SPIN_LOCK_UNLOCKED,
+};
+
+struct domain *dom_xen = &_dom_xen;
+struct domain *dom_io = &_dom_io;
+struct domain *dom_cow = &_dom_cow;
+
+void arch_get_xen_caps(xen_capabilities_info_t *info)
+{
+}
+
+asmlinkage void start_xen(void)
+{
+}
+
diff -r e701461b1251 xen/arch/arm/xen/shutdown.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/shutdown.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,38 @@
+/*
+ * shutdown.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/types.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/shutdown.h>
+
+void machine_halt(void)
+{
+	printk("machine_halt called: spinning....\n");
+	while(1);
+}
+
+void machine_restart(unsigned int delay_millisecs)
+{
+	printk("machine_restart called: spinning....\n");
+	while(1);
+}
+
diff -r e701461b1251 xen/arch/arm/xen/time.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/time.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,83 @@
+/*
+ * time.c 
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics 
+ *          Sang-bum Suh    <sbuk.suh@samsung.com>
+ *          JooYoung Hwang  <jooyoung.hwang@samsung.com>
+ *          Jaemin Ryu      <jm77.ryu@samsung.com>
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/init.h>
+#include <xen/time.h>
+#include <xen/sched.h>
+#include <xen/event.h>
+#include <xen/softirq.h>
+#include <asm/types.h>
+#include <asm/current.h>
+#include <asm/div64.h>
+#include <asm/time.h>
+
+void send_timer_event(struct vcpu *v)
+{
+	NOT_YET();
+}
+
+int reprogram_timer(s_time_t timeout)
+{
+	NOT_YET();
+
+	return 1;
+}
+
+void smp_broadcast_timer(void)
+{
+	NOT_YET();
+}
+
+void update_vcpu_system_time(struct vcpu *v)
+{
+	NOT_YET();
+
+	return;
+}
+
+void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base)
+{
+	NOT_YET();
+}
+
+struct tm wallclock_time(void)
+{
+	return gmtime(0);
+}
+
+
+s_time_t get_s_time(void)
+{
+	NOT_YET();
+
+	return 0;
+}
+
+void domain_set_time_offset(struct domain *d, int32_t time_offset_seconds)
+{
+	NOT_YET();
+}
+
+void timekeeping_init(void)
+{
+	NOT_YET();
+}
diff -r e701461b1251 xen/arch/arm/xen/tlb.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/tlb.c	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,26 @@
+/*
+ * tlb.c
+ *
+ * Copyright (C) 2008-2011 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/sched.h>
+#include <xen/softirq.h>
+
+u32 tlbflush_clock = 1U;
diff -r e701461b1251 xen/arch/arm/xen/xen.lds.S
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/arm/xen/xen.lds.S	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,159 @@
+/*
+ * xen.lds.S
+ *
+ * Copyright (C) 2008 Samsung Electronics
+ *          Sang-bum Suh <sbuk.suh@samsung.com>
+ *          ChanJu Park  <bestworld@samsung.com>
+ *          JaeMin Ryu   <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/config.h>
+#include <asm/page.h>
+
+OUTPUT_ARCH(arm)
+ENTRY(start)
+
+SECTIONS
+{
+	. = 0xFF008000;
+	_start = .;
+	.text : {
+		_stext = .;
+		*(.head)
+		*(.text)
+		*(.fixup)
+		*(.gnu.warning)
+		_etext = .;
+	}
+
+	.rodata : {
+		*(.rodata)
+		*(.rodata.*)
+	}
+
+	. = ALIGN(32);
+	.data.read_mostly : {
+		/* Exception table */
+		_sextable = .;
+		__start___ex_table = .;
+		*(.ex_table)
+		__stop___ex_table = .;
+
+		/* Pre-exception table */
+		__start___pre_ex_table = .;
+		*(.ex_table.pre)
+		__stop___pre_ex_table = .;
+		_eextable = .;
+		*(.data.read_mostly)
+		*(.data.rel.ro)
+		*(.data.rel.ro.*)
+	} 
+
+	. = ALIGN(PAGE_SIZE);
+	.data : {
+		_sdata = .;
+		*(.data)
+		*(.data.rel)
+		*(.data.rel.*)
+		_edata = .;
+	}
+
+	. = ALIGN(PAGE_SIZE);             /* Init code and data */
+	__init_begin = .;
+
+	.init.text : {
+		_sinittext = .;
+		*(.init.text) 
+		_einittext = .;
+	}
+
+	.init.data : {
+		_sinitdata = .;
+		*(.init.rodata)
+		*(.init.rodaata.str*)
+		*(.init.data)
+		*(.init.data.rel)
+		*(.init.data.rel.*)
+		_einitdata = .;
+	}
+
+	. = ALIGN(32);
+	.init.memtable : {
+		_smemtable = .;
+		*(.init.memtable)
+		*(.init.memtable.*)
+		_ememtable = .;
+	}
+
+	. = ALIGN(32);
+	.init.setup : {
+		_sinitsetup = .;
+		__setup_start = .;
+		*(.init.setup) 
+		__setup_end = .;
+		_einitsetup = .;
+	}
+
+	.initcall.init : {
+		_sinitcall = .;
+		__initcall_start = .;
+		*(.initcallpresmp.init)
+		__presmp_initcall_end = .;
+		*(.initcall1.init) 
+		__initcall_end = .;
+		_einitcall = .;
+	}
+
+	.xsm_initcall.init : {
+		_sxsm_initcall = .;
+		__xsm_initcall_start = .;
+		*(.xsm_initcall.init)
+		__xsm_initcall_end = .;
+		_exsm_initcall = .;
+	}
+	__init_end = .;
+
+	. = ALIGN(PAGE_SIZE);
+
+	.bss : {
+		_sbss = .;		/* BSS */
+		__bss_start = .;
+		*(.bss.page_aligned)
+		*(.bss.stack_aligned)
+		*(.bss.percpu)
+		*(.bss)
+		__bss_end = .;
+		_ebss = .;
+	}
+	_end = . ;
+	/* Sections to be discarded */
+
+  	/DISCARD/ : {
+  		*(.text.exit)
+		*(.data.exit)
+		*(.exitcall.exit)
+	}
+	/* Stabs debugging sections.  */
+	.stab 0 : { *(.stab) }
+	.stabstr 0 : { *(.stabstr) }
+	.stab.excl 0 : { *(.stab.excl) }
+	.stab.exclstr 0 : { *(.stab.exclstr) }
+	.stab.index 0 : { *(.stab.index) }
+	.stab.indexstr 0 : { *(.stab.indexstr) }
+	.comment 0 : { *(.comment) }
+	
+}
+
diff -r e701461b1251 xen/include/asm-arm/acpi.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/acpi.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,8 @@
+#ifndef __ARM_ACPI_H__
+#define __ARM_ACPI_H__
+
+#define COMPILER_DEPENDENT_INT64   long long
+#define COMPILER_DEPENDENT_UINT64  unsigned long long
+
+#endif /*!__ARM_ACPI_H__ */
+
diff -r e701461b1251 xen/include/asm-arm/asm-macros.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/asm-macros.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,106 @@
+#ifndef __ARM_ASM_MACROS_H__
+#define __ARM_ASM_MACROS_H__
+
+#include <asm/system.h>
+
+#ifdef __ASSEMBLY__
+/*
+ * Endian independent macros for shifting bytes within registers.
+ */
+#ifndef __ARMEB__
+#define pull            lsr
+#define push            lsl
+#define get_byte_0      lsl #0
+#define get_byte_1      lsr #8
+#define get_byte_2      lsr #16
+#define get_byte_3      lsr #24
+#define put_byte_0      lsl #0
+#define put_byte_1      lsl #8
+#define put_byte_2      lsl #16
+#define put_byte_3      lsl #24
+#else
+#define pull            lsl
+#define push            lsr
+#define get_byte_0      lsr #24
+#define get_byte_1      lsr #16
+#define get_byte_2      lsr #8
+#define get_byte_3      lsl #0
+#define put_byte_0      lsl #24
+#define put_byte_1      lsl #16
+#define put_byte_2      lsl #8
+#define put_byte_3      lsl #0
+#endif
+
+#define PLD(code...)	code
+
+#define CTXT_R0		0
+#define CTXT_R1		4
+#define CTXT_R2		8
+#define CTXT_R3		12
+#define CTXT_R4		16
+#define CTXT_R5		20
+#define CTXT_R6		24
+#define CTXT_R7		28
+#define CTXT_R8		32
+#define CTXT_R9		36
+#define CTXT_R10	40
+#define CTXT_R11	44
+#define CTXT_R12	48
+#define CTXT_USP	52
+#define CTXT_ULR	56
+#define CTXT_SSP	60
+#define CTXT_SLR	64
+#define CTXT_PC		68
+#define CTXT_SPSR	72
+#define CTXT_EXTRA	76
+#define CTXT_FRAME_SIZE	80
+
+#define SPFIX(code...)	code
+
+.macro  disable_irq, temp
+	msr	cpsr_c, #PSR_I_BIT | PSR_MODE_SVC
+.endm
+
+.macro	cci	rd
+	mov	\rd, #STACK_SIZE
+	sub	\rd, \rd, #1
+	bic	\rd, r13, \rd
+.endm
+
+/*
+ * Save the current IRQ state and disable IRQs.  Note that this macro
+ * assumes FIQs are enabled, and that the processor is in SVC mode.
+ */
+.macro	save_and_disable_irqs, oldcpsr, temp
+	mrs	\oldcpsr, cpsr
+	mov	\temp, #PSR_I_BIT | PSR_MODE_SVC
+	msr	cpsr_c, \temp
+.endm
+
+/*
+ * Restore interrupt state previously stored in a register.  We don't
+ * guarantee that this will preserve the flags.
+ */
+.macro	restore_irqs, oldcpsr
+	msr	cpsr_c, \oldcpsr
+.endm
+
+#define USER(x...)				\
+9999:	x;					\
+	.section .extable,"a";		\
+	.align	3;				\
+	.long	9999b,9001f;			\
+	.previous
+
+#define __ALIGN         .align 0
+#define __ALIGN_STR     ".align 0, 0x90"
+
+#define ALIGN           __ALIGN
+#define ALIGN_STR       __ALIGN_STR
+
+#define ENTRY(name) \
+  .global name; \
+  ALIGN; \
+  name:
+#endif
+#endif /* __ARM_ASM_MACROS_H__ */
diff -r e701461b1251 xen/include/asm-arm/atomic.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/atomic.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,179 @@
+#ifndef __ARM_ATOMIC_H__
+#define __ARM_ATOMIC_H__
+
+#ifndef __ASSEMBLY__
+#define read_atomic(p) 							\
+({									\
+	typeof(*p) __x;							\
+	switch ( sizeof(*p) ) {						\
+	case 1: __x = (typeof(*p))atomic_read8((uint8_t *)p); break;	\
+	case 2: __x = (typeof(*p))atomic_read16((uint16_t *)p); break;	\
+	case 4: __x = (typeof(*p))atomic_read32((uint32_t *)p); break;	\
+	case 8: __x = (typeof(*p))atomic_read64((uint64_t *)p); break;	\
+	default: __x = 0; __bad_atomic_size(); break;			\
+	}								\
+	__x;								\
+})
+
+#define write_atomic(p, x) 						\
+({									\
+	typeof(*p) __x = (x);						\
+	switch ( sizeof(*p) ) {						\
+	case 1: atomic_write8((uint8_t *)p, (uint8_t)__x); break;	\
+	case 2: atomic_write16((uint16_t *)p, (uint16_t)__x); break;	\
+	case 4: atomic_write32((uint32_t *)p, (uint32_t)__x); break;	\
+	case 8: atomic_write64((uint64_t *)p, (uint64_t)__x); break;	\
+	default: __bad_atomic_size(); break;				\
+	}								\
+	__x;								\
+})
+
+
+static inline uint8_t atomic_read8(const volatile uint8_t *addr)
+{
+	return (*addr);
+}
+
+
+static inline uint16_t atomic_read16(const volatile uint16_t *addr)
+{
+	return (*addr);
+}
+
+static inline uint32_t atomic_read32(const volatile uint32_t *addr)
+{
+	return (*addr);
+}
+
+static inline void atomic_write8(volatile uint8_t *addr, uint8_t val)
+{
+	(*addr) = val;
+}
+
+static inline void atomic_write16(volatile uint16_t *addr, uint16_t val)
+{
+	(*addr) = val;
+}
+
+static inline void atomic_write32(volatile uint32_t *addr, uint32_t val)
+{
+	(*addr) = val;
+}
+
+
+typedef struct {
+	volatile int counter;
+} atomic_t;
+
+
+#define ATOMIC_INIT(i)		{ (i) }
+
+#define atomic_read(v)		((v)->counter)
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__("@ atomic_set\n"
+"1:     ldrex   %0, [%1]\n"
+"       strex   %0, %2, [%1]\n"
+"       teq     %0, #0\n"
+"       bne     1b"
+	: "=&r" (tmp)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	__asm__ __volatile__("@ atomic_add_return\n"
+"1:     ldrex   %0, [%2]\n"
+"       add     %0, %0, %3\n"
+"       strex   %1, %0, [%2]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	return result;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	__asm__ __volatile__("@ atomic_sub_return\n"
+"1:     ldrex   %0, [%2]\n"
+"       sub     %0, %0, %3\n"
+"       strex   %1, %0, [%2]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	return result;
+}
+
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+	unsigned long tmp, tmp2;
+
+	__asm__ __volatile__("@ atomic_clear_mask\n"
+"1:     ldrex   %0, [%2]\n"
+"       bic     %0, %0, %3\n"
+"       strex   %1, %0, [%2]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+	: "=&r" (tmp), "=&r" (tmp2)
+	: "r" (addr), "Ir" (mask)
+	: "cc");
+}
+
+static inline atomic_t atomic_cmpxchg(atomic_t *ptr, atomic_t old, atomic_t new)
+{
+	atomic_t oldval, res;
+
+	do {
+		__asm__ __volatile__("@ atomic_cmpxchg\n"
+		"ldrex  %1, [%2]\n"
+		"mov    %0, #0\n"
+		"teq    %1, %3\n"
+		"strexeq %0, %4, [%2]\n"
+		: "=&r" (res.counter), "=&r" (oldval.counter)
+		: "r" (&ptr->counter), "Ir" (old.counter), "r" (new.counter)
+		: "cc");
+	} while (res.counter);
+
+	return oldval;
+}
+
+#define _atomic_read(v)		atomic_read(&v)
+#define _atomic_set(v,i)	atomic_set(&v,i)
+
+#define atomic_add(i, v)	(void) atomic_add_return(i, v)
+#define atomic_inc(v)		(void) atomic_add_return(1, v)
+#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
+#define atomic_dec(v)		(void) atomic_sub_return(1, v)
+
+#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)    (atomic_add_return(1, v))
+#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+
+#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+
+static inline atomic_t atomic_compareandswap(atomic_t old, atomic_t new, atomic_t *v)
+{
+        atomic_t rc;
+        rc = atomic_cmpxchg( (atomic_t *)v, old, new);
+        return rc;
+}
+#endif /*!__ASSEMBLY__ */
+#endif /*!__ARM_ATOMIC_H__ */
diff -r e701461b1251 xen/include/asm-arm/bitops.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/bitops.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,193 @@
+#ifndef __ARM_BITOPS_H__
+#define __ARM_BITOPS_H__
+
+#include <xen/config.h>
+#include <asm/system.h>
+
+#ifndef __ASSEMBLY__
+static inline void atomic_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	local_irq_save(flags);
+	*p |= mask;
+	local_irq_restore(flags);
+}
+
+static inline void atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	local_irq_save(flags);
+	*p &= ~mask;
+	local_irq_restore(flags);
+}
+
+static inline void atomic_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	local_irq_save(flags);
+	*p ^= mask;
+	local_irq_restore(flags);
+}
+
+static inline int atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned int res;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	local_irq_save(flags);
+	res = *p;
+	*p = res | mask;
+	local_irq_restore(flags);
+
+	return res & mask;
+}
+
+static inline int atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned int res;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	local_irq_save(flags);
+	res = *p;
+	*p = res & ~mask;
+	local_irq_restore(flags);
+
+	return res & mask;
+}
+
+static inline int atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long flags;
+	unsigned int res;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	local_irq_save(flags);
+	res = *p;
+	*p = res ^ mask;
+	local_irq_restore(flags);
+
+	return res & mask;
+}
+
+/*
+ * Now the non-atomic variants.  We let the compiler handle all
+ * optimisations for these.  These are all _native_ endian.
+ */
+static inline void set_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *m = (unsigned long *)p;
+
+	m[nr >> 5] |= (1UL << (nr & 31));
+}
+
+static inline void clear_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *m = (unsigned long *)p;
+
+	m[nr >> 5] &= ~(1UL << (nr & 31));
+}
+
+static inline void change_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *m = (unsigned long *)p;
+
+	m[nr >> 5] ^= (1UL << (nr & 31));
+}
+
+static inline int test_and_set_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *m = (unsigned long *)p;
+	unsigned long oldval, mask = 1UL << (nr & 31);
+
+	m += nr >> 5;
+
+	oldval = *m;
+	*m = oldval | mask;
+	return oldval & mask;
+}
+
+static inline int test_and_clear_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *m = (unsigned long *)p;
+	unsigned long oldval, mask = 1UL << (nr & 31);
+
+	m += nr >> 5;
+
+	oldval = *m;
+	*m = oldval & ~mask;
+	return oldval & mask;
+}
+
+static inline int test_and_change_bit(int nr, volatile void *p)
+{
+	volatile unsigned long *m = (unsigned long *)p;
+	unsigned long oldval, mask = 1UL << (nr & 31);
+
+	m += nr >> 5;
+
+	oldval = *m;
+	*m = oldval ^ mask;
+	return oldval & mask;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int test_bit(int nr, const volatile void *p)
+{
+	volatile unsigned long *m = (unsigned long *)p;
+
+	return (m[nr >> 5] >> (nr & 31)) & 1UL;
+}
+
+extern int _find_first_zero_bit(const void *p, int sz);
+extern int _find_next_zero_bit(const void *p, int sz, int offset);
+extern int _find_first_bit(const void *p, int sz);
+extern int _find_next_bit(const void *p, int sz, int offset);
+
+#define find_first_zero_bit(p,sz)	_find_first_zero_bit(p,sz)
+#define find_next_zero_bit(p,sz,off)	_find_next_zero_bit(p,sz,off)
+#define find_first_bit(p,sz)		_find_first_bit(p,sz)
+#define find_next_bit(p,sz,off)		_find_next_bit(p,sz,off)
+#define find_first_set_bit(word)	(ffs(word)-1)
+#define WORD_BITOFF_TO_LE(x)		((x))
+
+#define __test_and_set_bit(nr, addr)	test_and_set_bit(nr, addr)
+
+static __inline__ int generic_fls(int x);
+#define fls(x) \
+	( __builtin_constant_p(x) ? generic_fls(x) : \
+	  ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
+#define ffs(x)		({ unsigned long __t = (x); fls(__t & -__t); })
+#define __ffs(x)	(ffs(x) - 1)
+#define ffz(x)		__ffs( ~(x) )
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+#endif /*!__ASSEMBLY__ */
+#endif /*!__ARM_BITOPS_H__ */
diff -r e701461b1251 xen/include/asm-arm/bug.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/bug.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,32 @@
+#ifndef __ARM_BUG_H__
+#define __ARM_BUG_H__
+
+#ifndef __ASSEMBLY__
+#define BUG()							\
+	do {							\
+		printk("BUG at %s:%d\n", __FILE__, __LINE__);	\
+		while(1);					\
+	} while ( 0 )
+
+#define PANIC(msg)						\
+	do {							\
+		printk("Panic at %s:%d\n", __FILE__, __LINE__); \
+		while(1);                                                                                       \
+	}while (0)
+
+#define WARN()							\
+	do {							\
+		printk("WARNING at %s:%d\n", __FILE__, __LINE__);	\
+		while(1);					\
+	} while ( 0 )
+
+
+#define NOT_YET()						\
+	do {							\
+		printk("NOT YET %s:%d\n", __FILE__, __LINE__);	\
+	} while (0)
+
+void dump_execution_state(void);
+#endif /*!__ASSEMBLY__*/
+#endif /*!__ARM_BUG_H__*/
+
diff -r e701461b1251 xen/include/asm-arm/byteorder.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/byteorder.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,9 @@
+#ifndef __ARM_BYTEORDER_H__
+#define __ARM_BYTEORDER_H__
+
+#define __BYTEORDER_HAS_U64__
+
+#include <xen/byteorder/little_endian.h>
+
+
+#endif /* __ARM_BYTEORDER_H__ */
diff -r e701461b1251 xen/include/asm-arm/cache.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/cache.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,11 @@
+#ifndef __ARM_CACHE_H__
+#define __ARM_CACHE_H__
+
+#ifndef L1_CACHE_BYTES
+#define L1_CACHE_BYTES          32
+#endif
+
+#ifndef __ASSEMBLY__
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#endif /*!__ASSEMBLY__ */
+#endif /*!__ARM_CACHE_H__ */
diff -r e701461b1251 xen/include/asm-arm/config.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/config.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,61 @@
+#ifndef __ARM_CONFIG_H__
+#define __ARM_CONFIG_H__
+
+#include <asm/arch/config.h>
+
+#ifndef MAX_HVM_VCPUS
+#define MAX_HVM_VCPUS		1
+#endif
+
+#define MAX_VIRT_CPUS		XEN_LEGACY_MAX_VCPUS
+#define COMPAT_LEGACY_MAX_VCPUS XEN_LEGACY_MAX_VCPUS
+
+#ifndef MAX_PHYS_CPUS
+#define MAX_PHYS_CPUS		1
+#endif
+
+#define NR_CPUS			MAX_PHYS_CPUS
+
+#define ELFSIZE			32
+
+#ifndef XEN_PHYS_SIZE
+#define XEN_PHYS_SIZE		(0xF00000)
+#endif
+
+
+#if (MAX_PHYS_CPUS > 1)
+#define CONFIG_SMP		1
+#define SMP			1
+#endif
+
+#define STACK_ORDER		0
+#define STACK_SIZE		(PAGE_SIZE << STACK_ORDER)
+
+#ifndef NDEBUG
+# define MEMORY_GUARD
+#endif
+
+
+#define supervisor_mode_kernel	(0)
+
+#define HYPERVISOR_VIRT_START	(0xFC000000)
+#define XEN_VIRT_START		(0xFF000000)
+
+#ifndef __ASSEMBLY__
+
+#define OPT_CONSOLE_STR		"com1"
+
+#ifdef __cplusplus
+#define CPP_ASMLINKAGE extern "C"
+#else
+#define CPP_ASMLINKAGE
+#endif
+
+#ifndef asmlinkage
+#define asmlinkage CPP_ASMLINKAGE
+#endif
+#endif /* !__ASSEMBLY__ */
+#endif /* !__ARM_CONFIG_H__*/
+
+
+
diff -r e701461b1251 xen/include/asm-arm/cpu-domain.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/cpu-domain.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,39 @@
+#ifndef __ARM_CPU_DOMAIN_H__
+#define __ARM_CPU_DOMAIN_H__
+
+/*
+ * Domain ID
+ */
+#define DOMAIN_SVC		0
+#define DOMAIN_IO		2
+#define DOMAIN_USR		1
+#define DOMAIN_HYP		15
+
+/*
+ * Domain types
+ */
+#define DOMAIN_NOACCESS		0
+#define DOMAIN_CLIENT		1
+#define DOMAIN_MANAGER		3
+
+#define DOMAIN_VALUE(dom,type)	((type) << (2 * (dom)))
+
+#define DACR_STAT_HYP					\
+	(DOMAIN_VALUE(DOMAIN_HYP, DOMAIN_CLIENT) |	\
+	 DOMAIN_VALUE(DOMAIN_SVC, DOMAIN_CLIENT) |	\
+	 DOMAIN_VALUE(DOMAIN_IO,  DOMAIN_CLIENT) |	\
+	 DOMAIN_VALUE(DOMAIN_USR, DOMAIN_CLIENT))
+
+#define DACR_STAT_SVC					\
+	(DOMAIN_VALUE(DOMAIN_HYP, DOMAIN_CLIENT) |	\
+	 DOMAIN_VALUE(DOMAIN_SVC, DOMAIN_MANAGER) |	\
+	 DOMAIN_VALUE(DOMAIN_IO,  DOMAIN_MANAGER) |	\
+	 DOMAIN_VALUE(DOMAIN_USR, DOMAIN_CLIENT))	\
+
+#define DACR_STAT_USR					\
+	(DOMAIN_VALUE(DOMAIN_HYP, DOMAIN_CLIENT) |	\
+	 DOMAIN_VALUE(DOMAIN_SVC, DOMAIN_CLIENT) |	\
+	 DOMAIN_VALUE(DOMAIN_IO,  DOMAIN_CLIENT) |	\
+	 DOMAIN_VALUE(DOMAIN_USR, DOMAIN_CLIENT))
+
+#endif /* __ARM_CPU_DOMAIN_H__ */
diff -r e701461b1251 xen/include/asm-arm/current.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/current.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,73 @@
+/*
+ *  current.h
+ *
+ * Copyright (C) 2008 Samsung Electronics
+ *	ChanJu Park <beastworld@samsung.com>
+ *	JaeMin Ryu  <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ARM_CURRENT_H__
+#define __ARM_CURRENT_H__
+
+#include <public/xen.h>
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+struct vcpu;
+
+struct cpu_info {
+	struct vcpu	*vcpu;
+	unsigned long	vspsr;
+	unsigned long	vsp;
+	unsigned long	vlr;
+	unsigned long	vdacr;
+	struct cpu_user_regs guest_cpu_user_regs;
+};
+
+static inline struct cpu_info * get_cpu_info(void)
+{
+	register unsigned long sp asm("r13");
+	return (struct cpu_info *) ( sp & ~(STACK_SIZE -1)  ); 
+}
+
+static inline struct vcpu *get_current(void)
+{
+        return get_cpu_info()->vcpu;
+}
+
+#define current get_current()
+
+static inline void set_current(struct vcpu *v)
+{   
+	get_cpu_info()->vcpu = v;
+}
+
+static inline void set_current_vcpu(struct vcpu *v)
+{
+        struct cpu_info *ci;
+
+        ci = get_cpu_info();
+        ci->vcpu = v;
+}
+
+static inline void cpu_info_init(struct cpu_info *cpu_info)
+{
+        cpu_info->vcpu = NULL;
+}
+
+#define guest_cpu_user_regs()	(&(get_cpu_info()->guest_cpu_user_regs))
+#endif
+
+#endif /* __ARM_CURRENT_H__ */
diff -r e701461b1251 xen/include/asm-arm/debugger.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/debugger.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,24 @@
+#ifndef __ARM_DEBUGGER_H__
+#define __ARM_DEBUGGER_H__
+
+#include <xen/errno.h>
+
+#ifndef __ASSEMBLY__
+#define debugger_trap_immediate()	{;}
+
+static inline int debugger_trap_fatal(unsigned int vector, struct cpu_user_regs *regs)
+{
+	printk("Not implemented yet\n");
+
+	return -EINVAL;
+}
+
+
+void show_stack(struct cpu_user_regs *regs);
+void show_stack_overflow(unsigned int cpu, unsigned long esp);
+void show_registers(struct cpu_user_regs *regs);
+void show_execution_state(struct cpu_user_regs *regs);
+#endif /*!__ASSEMBLY__*/
+
+#endif /*!__ARM_DEBUGGER_H__ */
+
diff -r e701461b1251 xen/include/asm-arm/delay.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/delay.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,6 @@
+#ifndef __ARM_DELAY_H__
+#define __ARM_DELAY_H__
+
+#define udelay(n) 	_udelay(n)
+#endif
+
diff -r e701461b1251 xen/include/asm-arm/div64.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/div64.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,43 @@
+#ifndef __ARM_DIV64__
+#define __ARM_DIV64__
+
+#include <asm/system.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * The semantics of do_div() are:
+ *
+ * uint32_t do_div(uint64_t *n, uint32_t base)
+ * {
+ * 	uint32_t remainder = *n % base;
+ * 	*n = *n / base;
+ * 	return remainder;
+ * }
+ *
+ * In other words, a 64-bit dividend with a 32-bit divisor producing
+ * a 64-bit result and a 32-bit remainder.  To accomplish this optimally
+ * we call a special __do_div64 helper with completely non standard
+ * calling convention for arguments and results (beware).
+ */
+#define __xl "r0"
+#define __xh "r1"
+
+#define do_div(n,base)						\
+({								\
+	register unsigned int __base      asm("r4") = base;	\
+	register unsigned long long __n   asm("r0") = n;	\
+	register unsigned long long __res asm("r2");		\
+	register unsigned int __rem       asm(__xh);		\
+	asm(	__asmeq("%0", __xh)				\
+		__asmeq("%1", "r2")				\
+		__asmeq("%2", "r0")				\
+		__asmeq("%3", "r4")				\
+		"bl	__do_div64"				\
+		: "=r" (__rem), "=r" (__res)			\
+		: "r" (__n), "r" (__base)			\
+		: "ip", "lr", "cc");				\
+	n = __res;						\
+	__rem;							\
+})
+#endif /*!__ASSEMBLY__*/
+#endif /*!__ARM_DIV64_H__ */
diff -r e701461b1251 xen/include/asm-arm/domain.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/domain.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,79 @@
+#ifndef __ARM_DOMAIN_H__
+#define __ARM_DOMAIN_H__
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/spinlock.h>
+#include <xen/tasklet.h>
+#include <asm/numa.h>
+#include <asm/iommu.h>
+#include <public/arch-arm.h>
+
+#if 0
+#define MAPHASH_ENTRIES			8
+#define MAPHASH_HASHFN(pfn)		((pfn) & (MAPHASH_ENTRIES-1))
+#define MAPHASHENT_NOTINUSE		((u16)~0U)
+
+struct vcpu_maphash {
+    struct vcpu_maphash_entry {
+        unsigned long pfn;
+        uint16_t      idx;
+        uint16_t      refcnt;
+    } hash[MAPHASH_ENTRIES];
+}__cacheline_aligned;
+
+
+#define MAPCACHE_ORDER   8
+#define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
+
+struct mapcache {
+    /* The PTEs that provide the mappings, and a cursor into the array. */
+    l2e_t	*table;
+    unsigned int cursor;
+
+    /* Protects map_domain_page(). */
+    spinlock_t lock;
+
+    /* Which mappings are in use, and which are garbage to reap next epoch? */
+    unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
+    unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
+
+    /* Lock-free per-VCPU hash of recently-used mappings. */
+    struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
+}__cacheline_aligned;
+#endif
+struct arch_domain
+{
+#if 0
+    /* I/O-port admin-specified access capabilities. */
+    struct rangeset	*ioport_caps;
+
+    int *irq_pirq;
+    int *pirq_irq;
+
+    unsigned long *pirq_eoi_map;
+    unsigned long pirq_eoi_map_mfn;
+#endif
+    struct page_list_head relmem_list;
+};
+
+struct arch_vcpu
+{
+	struct vcpu_guest_context ctx;
+} __cacheline_aligned;
+
+//#define VCPU_REG(v, reg)	v->arch.ctx.reg
+
+#define return_reg(v)		((v)->arch.ctx.r0)
+
+void vcpu_show_execution_state(struct vcpu *v);
+void startup_cpu_idle_loop(void);
+
+extern struct vcpu *idle_vcpu[];
+
+static inline struct vcpu *get_idle_vcpu(unsigned int cpu)
+{
+        return idle_vcpu[cpu];
+}
+
+#endif 
+
diff -r e701461b1251 xen/include/asm-arm/elf.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/elf.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,53 @@
+/*
+ * elf.h
+ *
+ * Copyright (C) 2008 Samsung Electronics
+ *          Jaemin Ryu <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ARM_ELF_H__
+#define __ARM_ELF_H__
+
+typedef struct {
+	unsigned long	cr;
+} crash_xen_core_t;
+
+typedef struct {
+	unsigned long	r0;
+	unsigned long	r1;
+	unsigned long	r2;
+	unsigned long	r3;
+	unsigned long	r4;
+	unsigned long	r5;
+	unsigned long	r6;
+	unsigned long	r7;
+	unsigned long	r8;
+	unsigned long	r9;
+	unsigned long	r10;
+	unsigned long	r11;
+	unsigned long	r12;
+	unsigned long	r13;
+	unsigned long	r14;
+	unsigned long	r15;
+} ELF_Gregset;
+
+static inline void elf_core_save_regs(ELF_Gregset *core_regs,
+				      crash_xen_core_t *xen_core_regs)
+{
+}
+
+#endif
+
diff -r e701461b1251 xen/include/asm-arm/event.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/event.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,39 @@
+#ifndef __ARM_EVENT_H__
+#define __ARM_EVENT_H__
+
+#include <xen/shared.h>
+
+#ifndef __ASSEMBLY__
+void vcpu_kick(struct vcpu *v);
+void vcpu_mark_events_pending(struct vcpu *v);
+
+int hvm_local_events_need_delivery(struct vcpu *v);
+static inline int local_events_need_delivery(void)
+{
+	struct vcpu *v = current;
+	return ((vcpu_info(v, evtchn_upcall_pending) && 
+		!vcpu_info(v, evtchn_upcall_mask)));
+}
+
+static inline int local_event_delivery_is_enabled(void)
+{
+	return !vcpu_info(current, evtchn_upcall_mask);
+}
+
+static inline void local_event_delivery_disable(void)
+{
+	vcpu_info(current, evtchn_upcall_mask) = 1;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+	vcpu_info(current, evtchn_upcall_mask) = 0;
+}
+
+/* No arch specific virq definition now. Default to global. */
+static inline int arch_virq_is_global(int virq)
+{
+	return 1;
+}
+#endif /*!__ASSEMBLY__*/
+#endif /*!__ARM_EVENT_H__ */
diff -r e701461b1251 xen/include/asm-arm/flushtlb.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/flushtlb.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,25 @@
+#ifndef __ARM_FLUSHTLB_H__
+#define __ARM_FLUSHTLB_H__
+
+#include <xen/config.h>
+#include <xen/percpu.h>
+#include <xen/smp.h>
+
+#ifndef __ASSEMBLY__
+#define local_flush_tlb(mask)
+#define flush_tlb_mask(mask)	local_flush_tlb()
+
+#define tlbflush_filter(mask,page_timestamp)	\
+do {						\
+	printk("Not implemented yet.\n");	\
+} while(0)
+
+#define tlbflush_current_time()	tlbflush_clock
+
+DECLARE_PER_CPU(u32, tlb_caps);
+DECLARE_PER_CPU(u32, tlbflush_time);
+
+extern u32 tlbflush_clock;
+
+#endif
+#endif /* __ARM_TLB_H__ */
diff -r e701461b1251 xen/include/asm-arm/grant_table.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/grant_table.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,62 @@
+#ifndef __ASM_GRANT_TABLE_H__
+#define __ASM_GRANT_TABLE_H__
+
+#define INITIAL_NR_GRANT_FRAMES 4
+
+/*
+ * Caller must own caller's BIGLOCK, is responsible for flushing the TLB, and
+ * must hold a reference to the page.
+ */
+int create_grant_host_mapping(uint64_t addr, unsigned long frame,
+			      unsigned int flags, unsigned int cache_flags);
+int replace_grant_host_mapping(
+    uint64_t addr, unsigned long frame, uint64_t new_addr, unsigned int flags);
+
+#define gnttab_create_shared_page(d, t, i)                               \
+    do {                                                                 \
+        share_xen_page_with_guest(                                       \
+            virt_to_page((char *)(t)->shared_raw[i]),                    \
+            (d), XENSHARE_writable);                                     \
+    } while ( 0 )
+
+#define gnttab_create_status_page(d, t, i)                               \
+    do {                                                                 \
+        share_xen_page_with_guest(                                       \
+           virt_to_page((char *)(t)->status[i]),                         \
+            (d), XENSHARE_writable);                                     \
+    } while ( 0 )
+
+
+#define gnttab_shared_mfn(d, t, i)                      \
+    ((virt_to_maddr((t)->shared_raw[i]) >> PAGE_SHIFT))
+
+#define gnttab_shared_gmfn(d, t, i)                     \
+    (mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
+
+
+#define gnttab_status_mfn(t, i)                         \
+    ((virt_to_maddr((t)->status[i]) >> PAGE_SHIFT))
+
+#define gnttab_status_gmfn(d, t, i)                     \
+    (mfn_to_gmfn(d, gnttab_status_mfn(t, i)))
+
+#define gnttab_mark_dirty(d, f) ((void)f)
+
+static inline void gnttab_clear_flag(unsigned long nr, uint16_t *addr)
+{
+    clear_bit(nr, (unsigned long *)addr);
+}
+
+/* Foreign mappings of HHVM-guest pages do not modify the type count. */
+#define gnttab_host_mapping_get_page_type(op, ld, rd)   \
+    (!((op)->flags & GNTMAP_readonly) &&                \
+     (((ld) == (rd)) || !paging_mode_external(rd)))
+
+/* Done implicitly when page tables are destroyed. */
+#define gnttab_release_host_mappings(domain) ( paging_mode_external(domain) )
+
+static inline int replace_grant_supported(void)
+{
+    return 1;
+}
+#endif /* __ASM_GRANT_TABLE_H__ */
diff -r e701461b1251 xen/include/asm-arm/guest_access.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/guest_access.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,136 @@
+/*
+ */
+
+#ifndef __ARM_GUEST_ACCESS_H__
+#define __ARM_GUEST_ACCESS_H__
+
+#define __range_ok(addr, size)                                          \
+({                                                                      \
+	unsigned long flags, sum;                                       \
+	__asm__("adds   %1, %2, %3\n\t"                                 \
+		"sbcccs %1, %1, %0\n\t"                                 \
+		"movcc  %0, #0"                                         \
+		: "=&r"(flags), "=&r"(sum)                              \
+		: "r"(addr), "Ir"(size), "0"(HYPERVISOR_VIRT_START)     \
+		: "cc");                                                \
+	flags;                                                          \
+})
+
+#define access_ok(addr,size)    (__range_ok(addr,size) == 0)
+
+#define array_access_ok(addr,count,size)                                \
+	(likely(count < (~0UL/size)) && access_ok(addr,count*size))
+
+/* Raw access functions: no type checking. */
+#define raw_copy_to_guest(dst, src, len)        \
+     __copy_to_user((dst), (src), (len))
+#define raw_copy_from_guest(dst, src, len)      \
+     __copy_from_user((dst), (src), (len))
+#define raw_clear_guest(dst,  len)              \
+     __clear_user((dst), (len))
+#define __raw_copy_to_guest(dst, src, len)      \
+     __copy_to_user((dst), (src), (len))
+#define __raw_copy_from_guest(dst, src, len)    \
+     __copy_from_user((dst), (src), (len))
+#define __raw_clear_guest(dst,  len)            \
+     __clear_user((dst), (len))
+
+
+
+/* Is the guest handle a NULL reference? */
+#define guest_handle_is_null(hnd)		\
+	((hnd).p == NULL)
+
+/* Offset the given guest handle into the array it refers to. */
+#define guest_handle_add_offset(hnd, nr)	\
+	((hnd).p += (nr))
+
+/* Cast a guest handle to the specified type of handle. */
+#define guest_handle_cast(hnd, type)		\
+({						\
+    type *_x = (hnd).p;				\
+    (XEN_GUEST_HANDLE(type)) { _x };		\
+})
+
+
+/*
+ * Pre-validate a guest handle.
+ * Allows use of faster __copy_* functions.
+ */
+#define guest_handle_okay(hnd, nr)                      \
+    array_access_ok((hnd).p, (nr), sizeof(*(hnd).p))
+    
+#define guest_handle_subrange_okay(hnd, first, last)	\
+   (array_access_ok((hnd).p + (first),			\
+		   (last) - (first) + 1,		\
+		   sizeof(*(hnd).p)))
+/*
+ * Copy an array of objects to guest context via a guest handle.
+ * Optionally specify an offset into the guest array.
+ */
+#define copy_to_guest_offset(hnd, idx, ptr, nr) \
+    __copy_to_guest_offset(hnd, idx, ptr, nr)
+
+  
+/*
+ * Copy an array of objects from guest context via a guest handle.
+ * Optionally specify an offset into the guest array.
+ */
+#define copy_from_guest_offset(ptr, hnd, idx, nr) \
+    __copy_from_guest_offset(ptr, hnd, idx, nr)
+    
+    
+/* Copy sub-field of a structure to guest context via a guest handle. */
+#define copy_field_to_guest(hnd, ptr, field) \
+    __copy_field_to_guest(hnd, ptr, field)
+
+/* Copy sub-field of a structure from guest context via a guest handle. */
+#define copy_field_from_guest(ptr, hnd, field) \
+    __copy_field_from_guest(ptr, hnd, field)
+    
+#define __copy_to_guest_offset(hnd, off, ptr, nr) ({    \
+    const typeof(*(ptr)) *_s = (ptr);                   \
+    char (*_d)[sizeof(*_s)] = (void *)(hnd).p;          \
+    ((void)((hnd).p == (ptr)));                         \
+    __copy_to_user(_d+(off), _s, sizeof(*_s)*(nr));     \
+})
+
+#define __copy_from_guest_offset(ptr, hnd, off, nr) ({  \
+    const typeof(*(ptr)) *_s = (hnd).p;                 \
+    typeof(*(ptr)) *_d = (ptr);                         \
+    __copy_from_user(_d, _s+(off), sizeof(*_d)*(nr));   \
+})
+
+#define __copy_field_to_guest(hnd, ptr, field) ({       \
+    const typeof(&(ptr)->field) _x = &(hnd).p->field;   \
+    const typeof(&(ptr)->field) _y = &(ptr)->field;     \
+    __copy_to_user(_x, _y, sizeof(*_x));                \
+})
+
+#define __copy_field_from_guest(ptr, hnd, field) ({     \
+    const typeof(&(ptr)->field) _x = &(hnd).p->field;   \
+    const typeof(&(ptr)->field) _y = &(ptr)->field;     \
+    __copy_from_user(_y, _x, sizeof(*_x));              \
+})
+
+
+extern unsigned long __arch_copy_from_user(void *to, const void *from, unsigned long n);
+extern unsigned long __arch_copy_to_user(void *to, const void *from, unsigned long n);
+extern unsigned long __arch_clear_user(void *to, unsigned long n);
+
+static inline unsigned long __copy_from_user(void *to, const void *from, unsigned long n)
+{
+        return __arch_copy_from_user(to, from, n);
+}
+
+
+static inline unsigned long __copy_to_user(void *to, const void *from, unsigned long n)
+{
+        return __arch_copy_to_user(to, from, n);
+}
+
+static inline unsigned long __clear_user(void *to, unsigned long n)
+{
+	return __arch_clear_user(to, n);
+}
+#endif
diff -r e701461b1251 xen/include/asm-arm/hardirq.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/hardirq.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,21 @@
+#ifndef __ARM_HARDIRQ_H__
+#define __ARM_HARDIRQ_H__
+
+#include <xen/config.h>
+#include <xen/cache.h>
+
+#ifndef __ASSEMBLY__
+typedef struct irq_cpustat {
+	unsigned long __softirq_pending;
+	unsigned long __local_irq_count;
+	unsigned long __nmi_count;
+} __cacheline_aligned irq_cpustat_t;
+
+#include <xen/irq_cpustat.h>    /* Standard mappings for irq_cpustat_t above */
+
+#define in_irq() 	(local_irq_count(smp_processor_id()) != 0)
+
+#define irq_enter()     (local_irq_count(smp_processor_id())++)
+#define irq_exit()      (local_irq_count(smp_processor_id())--)
+#endif /*!__ASSEMBLY__*/
+#endif /*!__ARM_HARDIRQ_H__*/
diff -r e701461b1251 xen/include/asm-arm/hypercall.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/hypercall.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,68 @@
+/*
+ * hypercall.h
+ *
+ * Copyright (C) 2008 Samsung Electronics
+ *          JooYoung Hwang <jooyoung.hwang@samsung.com>
+ *          Jaemin Ryu <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ARM_HYPERCALL_H__
+#define __ARM_HYPERCALL_H__
+#include <public/physdev.h>
+
+#ifndef __ASSEMBLY__
+extern long do_set_trap_table(XEN_GUEST_HANDLE(trap_info_t) traps);
+
+extern int do_mmu_update(XEN_GUEST_HANDLE(mmu_update_t) ureqs,
+			 unsigned int count,
+			 XEN_GUEST_HANDLE(uint) pdone,
+			 unsigned int foreigndom);
+
+extern long do_set_gdt(XEN_GUEST_HANDLE(ulong) frame_list,
+		       unsigned int entries);
+
+extern long do_stack_switch(unsigned long ss, unsigned long esp);
+
+extern long do_fpu_taskswitch(int set);
+
+extern long do_set_debugreg(int reg, unsigned long value);
+
+extern unsigned long do_get_debugreg(int reg);
+
+extern long do_update_descriptor(u64 pa, u64 desc);
+
+extern int do_update_va_mapping(u32 va, u32 flags, u64 val64);
+
+extern long do_physdev_op(XEN_GUEST_HANDLE(physdev_op_t) uop);
+
+extern int do_update_va_mapping_otherdomain(unsigned long va,
+					    u64 val64,
+					    unsigned long flags,
+					    domid_t domid);
+
+extern int do_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_t) uops,
+			unsigned int count,
+			XEN_GUEST_HANDLE(uint) pdone,
+			unsigned int foreigndom);
+
+extern unsigned long do_iret(void);
+
+struct vcpu;
+extern long arch_do_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg);
+
+extern long do_set_callbacks(unsigned long event, unsigned long failsafe);
+#endif /*!__ASSEMBLY__*/
+#endif /*!__ARM_HYPERCALL_H__*/
diff -r e701461b1251 xen/include/asm-arm/init.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/init.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,4 @@
+#ifndef __ARM_INIT_H__
+#define __ARM_INIT_H__
+
+#endif /* _XEN_ASM_INIT_H */
diff -r e701461b1251 xen/include/asm-arm/io.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/io.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,32 @@
+#ifndef __ARM_IO_H__
+#define __ARM_IO_H__
+#include <xen/types.h>
+
+#define mmio_writeb(v,a)	(*(volatile unsigned char *)(a) = (v))
+#define mmio_writew(v,a)	(*(volatile unsigned short *)(a) = (v))
+#define mmio_writel(v,a)	(*(volatile unsigned int *)(a) = (v))
+
+#define mmio_readb(a)		(*(volatile unsigned char *)(a))
+#define mmio_readw(a)		(*(volatile unsigned short *)(a))
+#define mmio_readl(a)		(*(volatile unsigned int *)(a))
+
+#define writeb(v,a)		mmio_writeb(v,a)
+#define writew(v,a)		mmio_writew(v,a)
+
+#define writel(v,a)		mmio_writel(v,a)
+#define readb(a)		mmio_readb(a)
+#define readw(a)		mmio_readw(a)
+#define readl(a)		mmio_readl(a)
+
+#define ioremap(x,l)		(__va(x))
+#define iounmap(p)		((void)0)
+
+#define inb(a)			mmio_readb(a)
+#define inw(a)			mmio_readw(a)
+#define inl(a)			mmio_readl(a)
+
+#define outb(v,a)		mmio_writeb(v,a)
+#define outw(v,a)		mmio_writew(v,a)
+#define outl(v,a)		mmio_writel(v,a)
+
+#endif	/* __ARM_IO_H__ */
diff -r e701461b1251 xen/include/asm-arm/iocap.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/iocap.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,15 @@
+#ifndef __ARM_IOCAP_H__
+#define __ARM_IOCAP_H__
+
+#define ioports_permit_access(d, s, e)                  \
+    rangeset_add_range((d)->arch.ioport_caps, s, e)
+
+#define ioports_deny_access(d, s, e)                    \
+    rangeset_remove_range((d)->arch.ioport_caps, s, e)
+
+#define ioports_access_permitted(d, s, e)               \
+    rangeset_contains_range((d)->arch.ioport_caps, s, e)
+
+#define multipage_allocation_permitted(d, order)	(0)
+
+#endif
diff -r e701461b1251 xen/include/asm-arm/iommu.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/iommu.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,14 @@
+#ifndef __ARM_IOMMU_H__
+#define __ARM_IOMMU_H__
+
+#ifndef __ASSEMBLY__
+static inline int is_iomem_page(unsigned long mfn)
+{
+	return 0;
+}
+
+int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn, unsigned int flags);
+int iommu_unmap_page(struct domain *d, unsigned long gfn);
+#endif /*!__ASSEMBLY__*/
+#endif /*!__ARM_IOMMU_H__*/
+
diff -r e701461b1251 xen/include/asm-arm/irq.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/irq.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,50 @@
+#ifndef __ARM_IRQ_H__
+#define __ARM_IRQ_H__
+
+#include <xen/config.h>
+#include <xen/cpumask.h>
+
+#ifndef NR_IRQS
+#define NR_IRQS	256
+#endif
+
+#define domain_pirq_to_irq(d, pirq)	(pirq)
+#define domain_irq_to_pirq(d, irq)	(irq)                       
+#define domain_pirq_to_emuirq(d, pirq)	(pirq)
+#define domain_emuirq_to_pirq(d, irq)	(irq)
+
+#define irq_cfg(irq)		(&irq_cfg[irq])
+#define irq_to_desc(irq)	(&irq_desc[irq])	
+
+#define IRQ_MAX_GUESTS		7
+typedef struct {
+	unsigned int ack_type;
+        unsigned char nr_guests;
+        unsigned char in_flight;
+        unsigned char shareable;
+        struct domain *guest[IRQ_MAX_GUESTS];
+} irq_guest_action_t;
+
+struct irq_cfg {
+	int irq;
+};
+
+struct arch_irq_desc {
+};
+
+struct arch_pirq {
+	int irq;
+};
+
+typedef struct {
+    DECLARE_BITMAP(_bits,NR_IRQS);
+} vmask_t;
+
+extern struct irq_desc *irq_desc;
+
+static inline int irq_desc_initialized(struct irq_desc *desc)
+{
+	return 0;
+}
+
+#endif /* __ARM_IRQ_H__ */
diff -r e701461b1251 xen/include/asm-arm/mm.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/mm.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,237 @@
+#ifndef __ARM_MM_H__
+#define __ARM_MM_H__
+
+#include <xen/config.h>
+#include <xen/list.h>
+#include <asm/p2m.h>
+#include <asm/iommu.h>
+#include <asm/mmu.h>
+#include <asm/io.h>
+#include <asm/flushtlb.h>
+
+#define INVALID_GFN		(~0UL)
+#define INVALID_MFN             (~0UL)
+#define INVALID_M2P_ENTRY	(~0UL)
+
+#define VALID_M2P(_e)            (!((_e) & (1UL<<(BITS_PER_LONG-1))))
+#define SHARED_M2P_ENTRY         (~0UL - 1UL)
+#define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
+
+#define PFN_ORDER(_pfn)		((_pfn)->v.free.order)
+
+#define PAGE_TYPE(page)		(((page)->u.inuse.type_info) & PGT_type_mask )
+
+#define pickle_domptr(_d)	((u32)(unsigned long)(_d))
+#define unpickle_domptr(_d)	((struct domain *)(unsigned long)(_d))
+
+#define PRtype_info		"08lx"
+
+#define page_get_owner(_p)	(unpickle_domptr((_p)->v.inuse._domain))
+#define page_set_owner(_p,_d)	((_p)->v.inuse._domain = pickle_domptr(_d))
+
+#define XENSHARE_writable 	0
+#define XENSHARE_readonly 	1
+
+
+#define PG_shift(idx)		(BITS_PER_LONG - (idx))
+#define PG_mask(x, idx)		(x ## UL << PG_shift(idx))
+
+#define PGT_none		PG_mask(0, 4)  /* no special uses of this page   */
+#define PGT_l1_page_table	PG_mask(1, 4)  /* using as an L1 page table?     */
+#define PGT_l2_page_table	PG_mask(2, 4)  /* using as an L2 page table?     */
+#define PGT_l3_page_table	PG_mask(3, 4)  /* using as an L3 page table?     */
+#define PGT_writable_page	PG_mask(7, 4)  /* has writable mappings?         */
+#define PGT_shared_page		PG_mask(8, 4)  /* CoW sharable page              */
+#define PGT_type_mask		PG_mask(15, 4) /* Bits 28-31 or 60-63.           */
+
+ /* Owning guest has pinned this page to its current type? */
+#define _PGT_pinned		PG_shift(5)
+#define PGT_pinned		PG_mask(1, 5)
+
+ /* Has this page been validated for use as its current type? */
+#define _PGT_validated		PG_shift(6)
+#define PGT_validated		PG_mask(1, 6)
+
+/* Has this page been *partially* validated for use as its current type? */
+#define _PGT_partial		PG_shift(8)
+#define PGT_partial		PG_mask(1, 8)
+
+ /* Page is locked? */
+#define _PGT_locked		PG_shift(9)
+#define PGT_locked		PG_mask(1, 9)
+
+ /* Count of uses of this frame as its current type. */
+#define PGT_count_width		PG_shift(9)
+#define PGT_count_mask		((1UL<<PGT_count_width)-1)
+
+ /* Cleared when the owning guest 'frees' this page. */
+#define _PGC_allocated		PG_shift(1)
+#define PGC_allocated		PG_mask(1, 1)
+
+ /* Page is Xen heap? */
+#define _PGC_xen_heap		PG_shift(2)
+#define PGC_xen_heap		PG_mask(1, 2)
+
+ /* Set when is using a page as a page table */
+#define _PGC_page_table		PG_shift(3)
+#define PGC_page_table		PG_mask(1, 3)
+
+ /* Page is broken? */
+#define _PGC_broken		PG_shift(7)
+#define PGC_broken		PG_mask(1, 7)
+
+ /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
+#define PGC_state		PG_mask(3, 9)
+#define PGC_state_inuse		PG_mask(0, 9)
+#define PGC_state_offlining	PG_mask(1, 9)
+#define PGC_state_offlined	PG_mask(2, 9)
+#define PGC_state_free		PG_mask(3, 9)
+
+#define page_state_is(pg, st)	\
+	(((pg)->count_info&PGC_state) == PGC_state_##st)
+
+ /* Count of references to this frame. */
+#define PGC_count_width		PG_shift(9)
+#define PGC_count_mask		((1UL<<PGC_count_width)-1)
+
+#define set_gpfn_from_mfn(mfn, pfn) \
+	do { } while(0)
+
+#define get_gpfn_from_mfn(mfn)	((mfn))
+
+#define mfn_to_gmfn(_d, mfn)	(mfn)
+
+#define gmfn_to_mfn(_d, gpfn)	(gpfn)
+
+#define domain_set_alloc_bitsize(d)	((void)0)
+#define domain_clamp_alloc_bitsize(d,b)	(b)
+
+#define write_ptbase(v)	cpu_switch_ttb((v)->arch.ctx.ttbr0)
+
+struct page_info
+{
+	struct page_list_entry list;
+
+	/* Reference count and various PGC_xxx flags and fields. */
+	unsigned long count_info;
+
+	/* Context-dependent fields follow... */
+	union {
+		/* Page is in use: ((count_info & PGC_count_mask) != 0). */
+		struct {
+			/* Type reference count and various PGT_xxx flags and fields. */
+			unsigned long type_info;
+		} inuse;
+
+		/* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
+		struct {
+			/* Do TLBs need flushing for safety before next page use? */
+			bool_t need_tlbflush;
+		} free;
+	} u;
+
+	union {
+		/* Page is in use, but not as a shadow. */
+		struct {
+			/* Owner of this page (zero if page is anonymous). */
+			unsigned long _domain;
+		} inuse;
+
+		/* Page is on a free list. */
+		struct {
+			/* Order-size of the free chunk this page is the head of. */
+			unsigned int order;
+		} free;
+	} v;
+
+	/*
+	 * Timestamp from 'TLB clock', used to avoid extra safety flushes.
+	 * Only valid for: a) free pages, and b) pages with zero type count
+	 * (except page table pages when the guest is in shadow mode).
+	 */
+	u32 tlbflush_timestamp;
+};
+
+#ifndef NDEBUG
+#define TYPE_SAFETY 1
+#endif
+
+#ifdef TYPE_SAFETY
+#define TYPE_SAFE(_type,_name)						\
+typedef struct { _type _name; } _name##_t;				\
+static inline _name##_t _##_name(_type n) { return (_name##_t) { n }; } \
+static inline _type _name##_x(_name##_t n) { return n._name; }
+#else
+#define TYPE_SAFE(_type,_name)                                          \
+typedef _type _name##_t;                                                \
+static inline _name##_t _##_name(_type n) { return n; }                 \
+static inline _type _name##_x(_name##_t n) { return n; }
+#endif
+
+TYPE_SAFE(unsigned long,mfn);
+
+#ifdef MEMORY_GUARD
+void memguard_init(void);
+void memguard_guard_range(void *p, unsigned long l);
+void memguard_unguard_range(void *p, unsigned long l);
+#else
+#define memguard_init()                ((void)0)
+#define memguard_guard_range(_p,_l)    ((void)0)
+#define memguard_unguard_range(_p,_l)  ((void)0)
+#endif /* MEMORY_GUARD */
+
+extern unsigned long xenheap_phys_start, xenheap_phys_end;
+extern unsigned long xen_phys_start, xen_phys_end;
+extern unsigned long min_page, max_page;
+
+extern struct domain *dom_xen, *dom_io, *dom_cow;
+extern struct page_info *frame_table;
+
+void memguard_guard_stack(void *p);
+
+void share_xen_page_with_guest(struct page_info *page, struct domain *d, int readonly);
+void share_xen_page_with_privileged_guests(struct page_info *page, int readonly);
+
+int alloc_page_type(struct page_info *page, unsigned long type);
+void free_page_type(struct page_info *page, unsigned long type);
+
+void put_page(struct page_info *page);
+int  get_page(struct page_info *page, struct domain *domain);
+
+void put_page_type(struct page_info *page);
+int  get_page_type(struct page_info *page, unsigned long type);
+
+struct domain *page_get_owner_and_reference(struct page_info *page);
+
+int is_iomem_page(unsigned long mfn);
+
+int steal_page(struct domain *d, struct page_info *page, unsigned int memflags);
+int donate_page(struct domain *d, struct page_info *page, unsigned int memflags);
+
+unsigned long domain_get_maximum_gpfn(struct domain *d);
+
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
+
+int map_pages_to_xen(unsigned long virt, unsigned long mfn, int nr, unsigned long flags);
+
+static inline void put_page_and_type(struct page_info *page)
+{
+	put_page_type(page);
+	put_page(page);
+}
+
+static inline int get_page_and_type(struct page_info *page,
+                                    struct domain *domain,
+                                    unsigned long type)
+{
+	int rc = get_page(page, domain);
+
+	if ( likely(rc) && unlikely(!get_page_type(page, type)) ) {
+		put_page(page);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+#endif /* __ARM_MM_H__ */
diff -r e701461b1251 xen/include/asm-arm/mmu.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/mmu.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,11 @@
+#ifndef __ARM_MMU_H__
+#define __ARM_MMU_H__
+
+#define PADDR_BITS              32
+#define PADDR_MASK              ((1UL << PADDR_BITS) - 1)
+
+#define VADDR_BITS              32
+#define VADDR_MASK              ((1UL << VADDR_BITS) - 1)
+
+#endif
+
diff -r e701461b1251 xen/include/asm-arm/multicall.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/multicall.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,9 @@
+
+#ifndef __ARM_MULTICALL_H__
+#define __ARM_MULTICALL_H__
+
+#include <xen/errno.h>
+
+#define do_multicall_call(_call)
+
+#endif /* __ARM_MULTICALL_H__ */
diff -r e701461b1251 xen/include/asm-arm/numa.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/numa.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,21 @@
+#ifndef __ARM_NUMA_H__ 
+#define __ARM_NUMA_H__
+
+#include <xen/cpumask.h>
+
+#define NODES_SHIFT 	0
+#define MAX_NUMNODES	(1 << NODES_SHIFT)
+
+
+#define NUMA_NO_NODE	0xFF
+
+extern unsigned char cpu_to_node[];
+extern cpumask_t     node_to_cpumask[];
+
+#define cpu_to_node(cpu)	(cpu_to_node[cpu])
+#define parent_node(node)	(node)
+#define node_to_first_cpu(node)	(__ffs(node_to_cpumask[node]))
+#define node_to_cpumask(node)	(node_to_cpumask[node])
+
+#define phys_to_nid(addr)	(0)
+#endif
diff -r e701461b1251 xen/include/asm-arm/p2m.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/p2m.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,10 @@
+#ifndef __ARM_P2M_H__
+#define __ARM_P2M_H__
+
+#define gfn_to_mfn(d, g, t)		(g)
+#define gfn_to_mfn_query(d, g, t)	(g)
+#define gfn_to_mfn_guest(d, g, t)	(g)
+#define gfn_to_mfn_unshare(d, g, t)	(g)
+
+#define put_gfn(d, gfn)
+#endif
diff -r e701461b1251 xen/include/asm-arm/page.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/page.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,95 @@
+#ifndef __ARM_PAGE_H__
+#define __ARM_PAGE_H__
+
+#include <asm/config.h>
+#include <asm/types.h>
+
+#define PAGE_SHIFT		12
+#define PAGE_SIZE		(1 << PAGE_SHIFT)
+#define PAGE_MASK		(~(PAGE_SIZE - 1))
+
+#define PAGE_ALIGN(x)		(((x) + PAGE_SIZE - 1) & PAGE_MASK)
+
+#ifndef __ASSEMBLY__
+#include <xen/lib.h>
+
+#define clear_page(_p)		memset((void *)(_p), 0, PAGE_SIZE)
+#define copy_page(_t, _f)	memcpy((void *)(_t), (void *)(_f), PAGE_SIZE);
+
+static inline int get_order_from_bytes(unsigned long size)
+{
+	int order;
+
+	size = (size - 1) >> PAGE_SHIFT;
+	for ( order = 0; size; order++ )
+		size >>= 1;
+
+	return order;
+}
+
+static inline int get_order_from_pages(unsigned long nr_pages)
+{
+	int order;
+
+	nr_pages--;
+	for ( order = 0; nr_pages; order++ )
+		nr_pages >>= 1;
+
+	return order;
+}
+
+/* Convert between Xen-heap virtual addresses and machine addresses. */
+
+#define virt_to_maddr(addr)	__virt_to_maddr((void *)(addr))
+#define maddr_to_virt(addr)	__maddr_to_virt((paddr_t)(addr))
+
+#define virt_to_mfn(addr)	(virt_to_maddr(addr) >> PAGE_SHIFT)
+
+#define virt_to_page(addr)	(mfn_to_page(virt_to_maddr(addr) >> PAGE_SHIFT))
+#define page_to_virt(_page)	maddr_to_virt(page_to_mfn(_page) << PAGE_SHIFT)
+
+#define __pa(addr)		(virt_to_maddr(addr))
+#define __va(addr)		(maddr_to_virt(addr))
+
+
+#define mfn_valid(_pfn)		(((_pfn) >= min_page) && ((_pfn) <= max_page))
+
+#define mfn_to_page(_pfn)	((struct page_info *)(frame_table + ((_pfn) - min_page)))
+#define page_to_mfn(_page)	((unsigned long)((_page + min_page) - frame_table ))
+#define page_to_maddr(_page)	(page_to_mfn(_page) << PAGE_SHIFT)
+#define maddr_to_page(addr)	mfn_to_page((addr >> PAGE_SHIFT))
+
+#define mfn_to_virt(_mfn)	(maddr_to_virt(((_mfn) << PAGE_SHIFT)))
+
+#define paddr_to_pfn(addr)	((unsigned long)((addr) >> PAGE_SHIFT))
+
+#define is_xen_heap_mfn(_pfn)			\
+({						\
+	unsigned long phys;			\
+	phys = (_pfn) << PAGE_SHIFT;		\
+	((phys >= xenheap_phys_start) &&	\
+	 (phys < xenheap_phys_end));		\
+})
+
+#define is_xen_heap_page(page)                  \
+	is_xen_heap_mfn(page_to_mfn(page))
+
+#define is_xen_fixed_mfn(_mfn)			\
+	is_xen_heap_mfn(_mfn)
+
+extern unsigned long xen_phys_start;
+static inline paddr_t __virt_to_maddr(void *addr)
+{
+	return (paddr_t)(addr) - XEN_VIRT_START + xen_phys_start;
+}
+
+static inline void *__maddr_to_virt(unsigned long addr)
+{
+	return (void *)((addr) + XEN_VIRT_START - xen_phys_start);
+}
+
+#define __page_aligned__ \
+    __attribute_used__ __attribute__ ((__section__ (".bss.page_aligned")))
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ARM_PAGE_H__ */
diff -r e701461b1251 xen/include/asm-arm/pci.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/pci.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,9 @@
+#ifndef __ASM_PCI_H__
+#define __ASM_PCI_H__
+
+struct arch_pci_dev {
+};
+
+
+#endif
+
diff -r e701461b1251 xen/include/asm-arm/percpu.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/percpu.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,16 @@
+#ifndef __ARM_PERCPU_H__
+#define __ARM_PERCPU_H__
+
+#ifndef __ASSEMBLY__
+#define __DEFINE_PER_CPU(type, name, suffix) \
+	__typeof__(type) per_cpu_##name[NR_CPUS] = {0,}
+
+#define DECLARE_PER_CPU(type, name) \
+	extern __typeof__(type) per_cpu__##name[NR_CPUS]
+
+#define per_cpu(var, cpu)	(per_cpu__##var[cpu])
+
+#define __get_cpu_var(var)	per_cpu(var, smp_processor_id())
+
+#endif /* !__ASSEMBLY */
+#endif /* !__ARM_PERCPU_H__ */
diff -r e701461b1251 xen/include/asm-arm/processor.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/processor.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,219 @@
+/*
+ *  processor.h
+ *
+ * Copyright (C) 2008 Samsung Electronics
+ *          JaeMin Ryu  <jm77.ryu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public version 2 of License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ARM_PROCESSOR_H__
+#define __ARM_PROCESSOR_H__
+
+/*
+ * PSR bits
+ */
+#define PSR_MODE_USR            0x00000010
+#define PSR_MODE_FIQ            0x00000011
+#define PSR_MODE_IRQ            0x00000012
+#define PSR_MODE_SVC            0x00000013
+#define PSR_MODE_ABT            0x00000017
+#define PSR_MODE_UND            0x0000001b
+#define PSR_MODE_SYS            0x0000001f
+#define PSR_MODE_MASK           0x0000001f
+#define PSR_T_BIT               0x00000020
+#define PSR_F_BIT               0x00000040
+#define PSR_I_BIT               0x00000080
+#define PSR_J_BIT               0x01000000
+#define PSR_Q_BIT               0x08000000
+#define PSR_V_BIT               0x10000000
+#define PSR_C_BIT               0x20000000
+#define PSR_Z_BIT               0x40000000
+#define PSR_N_BIT               0x80000000
+
+/*
+
+ * Groups of PSR bits
+ */
+#define PSR_MASK_FLAGS          0xff000000      /* Flags                */
+#define PSR_MASK_STATUS         0x00ff0000      /* Status               */
+#define PSR_MASK_EXTENSION      0x0000ff00      /* Extension            */
+#define PSR_MASK_CONTROL        0x000000ff      /* Control              */
+
+
+#define MIDR(r)		p15, 0, r,  c0, c0, 0
+#define CTR(r)		p15, 0, r,  c0, c0, 1
+#define TCMTR(r)	p15, 0, r,  c0, c0, 2
+#define TLBTR(r)	p15, 0, r,  c0, c0, 3
+#define MPIDR(r)	p15, 0, r,  c0, c0, 5
+#define SCTLR(r)	p15, 0, r,  c1, c0, 0
+#define ACTLR(r)	p15, 0, r,  c1, c0, 1
+#define SCR(r)		p15, 0, r,  c1, c1, 0
+#define SDER(r)		p15, 0, r,  c1, c1, 1
+#define NSACR(r)	p15, 0, r,  c1, c1, 2
+#define TTBR0(r)	p15, 0, r,  c2, c0, 0
+#define TTBR1(r)	p15, 0, r,  c2, c0, 1
+#define TTBCR(r)	p15, 0, r,  c2, c0, 2
+#define DACR(r)		p15, 0, r,  c3, c0, 0
+#define DFSR(r)		p15, 0, r,  c5, c0, 0
+#define IFSR(r)		p15, 0, r,  c5, c0, 1
+#define DFAR(r)		p15, 0, r,  c6, c0, 0
+#define IFAR(r)		p15, 0, r,  c6, c0, 2
+#define VBAR(r)		p15, 0, r, c12, c0, 0
+#define MVBAR(r)	p15, 0, r, c12, c0, 1
+/*
+ * System Control Register
+ */
+#define SCTLR_M         (1 << 0)  /* MMU enable                           */
+#define SCTLR_A         (1 << 1)  /* Alignment abort enable               */
+#define SCTLR_C         (1 << 2)  /* Dcache enable                        */
+#define SCTLR_W         (1 << 3)  /* Write buffer enable                  */
+#define SCTLR_P         (1 << 4)  /* 32-bit exception handler             */
+#define SCTLR_D         (1 << 5)  /* 32-bit data address range            */
+#define SCTLR_L         (1 << 6)  /* Implementation defined               */
+#define SCTLR_B         (1 << 7)  /* Big endian                           */
+#define SCTLR_S         (1 << 8)  /* System MMU protection                */
+#define SCTLR_R         (1 << 9)  /* ROM MMU protection                   */
+#define SCTLR_SW        (1 << 10) /* Implementation defined               */
+#define SCTLR_Z         (1 << 11) /* Implementation defined               */
+#define SCTLR_I         (1 << 12) /* Icache enable                        */
+#define SCTLR_V         (1 << 13) /* Vectors relocated to 0xffff0000      */
+#define SCTLR_RR        (1 << 14) /* Round Robin cache replacement        */
+#define SCTLR_L4        (1 << 15) /* LDR pc can set T bit                 */
+#define SCTLR_DT        (1 << 16)
+#define SCTLR_IT        (1 << 18)
+#define SCTLR_ST        (1 << 19)
+#define SCTLR_FI        (1 << 21) /* Fast interrupt (lower latency mode)  */
+#define SCTLR_U         (1 << 22) /* Unaligned access operation           */
+#define SCTLR_XP        (1 << 23) /* Extended page tables                 */
+#define SCTLR_VE        (1 << 24) /* Vectored interrupts                  */
+#define SCTLR_EE        (1 << 25) /* Exception endianess                  */
+#define SCTLR_NMFI      (1 << 27) /* Nonmaskable fast interrupt enable    */
+#define SCTLR_TRE       (1 << 28) /* TEX remap                            */
+#define SCTLR_AFE       (1 << 29) /* Access flag enable                   */
+#define SCTLR_TE        (1 << 30) /* Thumb exception enable               */
+
+/*
+ * Co-Processor Access Register
+ */
+#define CPAR_BIT_CP0    (1 << 0)
+#define CPAR_BIT_CP1    (1 << 1)
+#define CPAR_BIT_CP2    (1 << 2)
+#define CPAR_BIT_CP3    (1 << 3)
+#define CPAR_BIT_CP4    (1 << 4)
+#define CPAR_BIT_CP5    (1 << 5)
+#define CPAR_BIT_CP6    (1 << 6)
+#define CPAR_BIT_CP7    (1 << 7)
+#define CPAR_BIT_CP8    (1 << 8)
+#define CPAR_BIT_CP9    (1 << 9)
+#define CPAR_BIT_CP10   (1 << 10)
+#define CPAR_BIT_CP11   (1 << 11)
+#define CPAR_BIT_CP12   (1 << 12)
+#define CPAR_BIT_CP13   (1 << 13)
+
+/*
+ * Auxiliary Control Register
+ */
+#define ACTLR_FW        (1 << 0)  /* Cache and TLB maintenance broadcast  */
+#define ACTLR_DP2       (1 << 1)  /* L2 Dside prefetch                    */
+#define ACTLR_DP1       (1 << 2)  /* L1 Dside prefetch                    */
+#define ACTLR_FOZ       (1 << 3)  /* Full of zero                         */
+#define ACTLR_SMP       (1 << 6)  /* SMP/nAMP                             */
+#define ACTLR_EXCL      (1 << 7)  /* Exclusive cache enable               */
+#define ACTLR_PARON     (1 << 9)  /* Parity on                            */
+
+/*
+ * Secure Configuration Register
+ */
+#define SCR_NS          (1 << 0)  /* Non-secure mode                      */
+#define SCR_IRQ         (1 << 1)  /* IRQ exception handling mode          */
+#define SCR_FIQ         (1 << 2)  /* FIQ exception handling mode          */
+#define SCR_EA          (1 << 3)  /* External exception handling mode     */
+#define SCR_FW          (1 << 4)  /* F Bit access allow bit               */
+#define SCR_AW          (1 << 5)  /* A bit access allow bit               */
+
+#define NSACR_NSSMP     (1 << 18)
+#define NSACR_TL        (1 << 17)
+#define NSACR_NSACEDIS  (1 << 15)
+#define NSACR_NSD32DIS  (1 << 14)
+#define NSACR_CP11      (1 << 11)
+#define NSACR_CP10      (1 << 10)
+
+
+#ifndef __ASSEMBLY__
+
+#define cpu_to_core(cpu)        (0)
+#define cpu_to_socket(cpu)      (0)
+
+#define p14     14
+#define p15     15
+#define c0      0
+#define c1      1
+#define c2      2
+#define c3      3
+#define c4      4
+#define c5      5
+#define c6      6
+#define c7      7
+#define c8      8
+#define c9      9
+#define c10     10
+#define c11     11
+#define c12     12
+#define c13     13
+#define c14     14
+#define c15     15
+
+#define MCR(cp,op1,Rd,CRn,CRm,op2)  \
+	__asm__ __volatile__(" mcr " #cp",%1,%2,"#CRn","#CRm ",%5" \
+	: : "i" (cp), "i" (op1), "r" (Rd), "i" (CRn), "i" (CRm), "i" (op2))
+
+#define MRC(cp,op1,Rd,CRn,CRm,op2)  \
+	__asm__ __volatile__( " mrc " #cp",%2,%0," #CRn","#CRm",%5" \
+	: "=r" (Rd) : "i" (cp), "i" (op1), "i" (CRn), "i" (CRm), "i" (op2))
+
+static inline void cpu_wait_for_event(void)
+{
+        __asm__ __volatile__("wfe" : : : "memory");
+}
+
+static inline void cpu_wait_for_interrupt(void)
+{
+        __asm__ __volatile("wfi" : : : "memory");
+}
+
+static inline void cpu_send_event(void)
+{
+        __asm__ __volatile__("sev" : : : "memory");
+}
+
+#define CPU_MODE_SMP	1
+#define CPU_MODE_AMP	0
+
+static inline void cpu_set_coherency_mode(unsigned int mode)
+{
+	unsigned long aux;
+
+	MRC(p15, 0, aux, c1, c0, 1);
+
+	if ((mode == CPU_MODE_SMP)) {
+		aux |= (ACTLR_SMP | ACTLR_FW);
+	} else {
+		aux &= ~(ACTLR_SMP | ACTLR_FW);
+	}
+
+	MCR(p15, 0, aux, c1, c0, 1);
+}
+
+#endif
+#endif
diff -r e701461b1251 xen/include/asm-arm/regs.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/regs.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,17 @@
+#ifndef __ASM_ARM_REGS_H__
+#define __ASM_ARM_REGS_H__
+
+#include <xen/types.h>
+#include <asm/current.h>
+
+#ifndef __ASSEMBLY__
+static inline int guest_mode(struct cpu_user_regs *regs)
+{
+	while(1);
+
+	return 0;
+}
+#endif
+
+#endif
+
diff -r e701461b1251 xen/include/asm-arm/smp.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/smp.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,28 @@
+#ifndef __ARM_SMP_H__
+#define __ARM_SMP_H__
+
+#include <xen/config.h>
+#include <xen/spinlock.h>
+#include <xen/cpumask.h>
+#include <xen/percpu.h>
+#include <asm/current.h>
+
+#ifndef _ASSEMBLY__
+#define raw_smp_processor_id()			\
+({						\
+	unsigned int id;			\
+	__asm__("mrc p15, 0, %0, c0, c0, 5"	\
+		: "=r" (id));			\
+	id &= 0x0F;				\
+})
+
+#define cpu_is_offline(cpu)	unlikely(!cpu_online(cpu))
+
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
+
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* !__ARM_SMP_H__ */
diff -r e701461b1251 xen/include/asm-arm/softirq.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/softirq.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,11 @@
+#ifndef __ASM_SOFTIRQ_H__
+#define __ASM_SOFTIRQ_H__
+
+#define RESERVED_SOFTIRQ0	(NR_COMMON_SOFTIRQS + 0)
+#define RESERVED_SOFTIRQ1	(NR_COMMON_SOFTIRQS + 1)
+#define VCPU_KICK_SOFTIRQ	(NR_COMMON_SOFTIRQS + 2)
+
+#define NR_ARCH_SOFTIRQS	3
+
+#endif /* __ASM_SOFTIRQ_H__ */
+
diff -r e701461b1251 xen/include/asm-arm/spinlock.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/spinlock.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,200 @@
+#ifndef __ARM_SPINLOCK_H__
+#define __ARM_SPINLOCK_H__
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <asm/atomic.h>
+
+/*
+ * Unlocked value : 0
+ * Locked value   : 1
+ */
+#define _RAW_SPIN_LOCK_UNLOCKED	{ 0 }
+#define _RAW_RW_LOCK_UNLOCKED	{ 0 }
+
+typedef struct {
+	volatile unsigned int lock;
+}raw_spinlock_t;
+
+typedef struct rwlock {
+	volatile unsigned int lock;
+}raw_rwlock_t;
+
+#define _raw_spin_is_locked(x)	((x)->lock != 0)
+
+static inline void _raw_spin_lock(raw_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%1]\n"
+"	teq	%0, #0\n"
+"	wfene\n"
+"	strexeq	%0, %2, [%1]\n"
+"	teqeq	%0, #0\n"
+"	bne	1b"
+	: "=&r" (tmp)
+	: "r" (&lock->lock), "r" (1)
+	: "cc");
+
+	mb();
+}
+
+static inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"	ldrex	%0, [%1]\n"
+"	teq	%0, #0\n"
+"	strexeq	%0, %2, [%1]"
+	: "=&r" (tmp)
+	: "r" (&lock->lock), "r" (1)
+	: "cc");
+
+	if (tmp == 0) {
+		mb();
+
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+	mb();
+
+	__asm__ __volatile__(
+"	str	%1, [%0]\n"
+"	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */
+"	sev"
+	:
+	: "r" (&lock->lock), "r" (0)
+	: "cc");
+}
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31.  When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void _raw_write_lock(raw_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%1]\n"
+"	teq	%0, #0\n"
+"	wfene\n"
+"	strexeq	%0, %2, [%1]\n"
+"	teq	%0, #0\n"
+"	bne	1b"
+	: "=&r" (tmp)
+	: "r" (&rw->lock), "r" (0x80000000)
+	: "cc");
+
+	mb();
+}
+
+static inline int _raw_write_trylock(raw_rwlock_t *rw)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%1]\n"
+"	teq	%0, #0\n"
+"	strexeq	%0, %2, [%1]"
+	: "=&r" (tmp)
+	: "r" (&rw->lock), "r" (0x80000000)
+	: "cc");
+
+	if (tmp == 0) {
+		mb();
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+static inline void _raw_write_unlock(raw_rwlock_t *rw)
+{
+	mb();
+
+	__asm__ __volatile__(
+	"str	%1, [%0]\n"
+"	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */
+"	sev\n"
+	:
+	: "r" (&rw->lock), "r" (0)
+	: "cc");
+}
+
+#define _raw_rw_is_locked(x)		((x)->lock != 0)
+#define _raw_rw_is_write_locked(x)	((x)->lock <= 0)
+#define _raw_write_can_lock(x)		((x)->lock == 0)
+
+static inline void _raw_read_lock(raw_rwlock_t *rw)
+{
+	unsigned long tmp, tmp2;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%2]\n"
+"	adds	%0, %0, #1\n"
+"	strexpl	%1, %0, [%2]\n"
+"	wfemi\n"
+"	rsbpls	%0, %1, #0\n"
+"	bmi	1b"
+	: "=&r" (tmp), "=&r" (tmp2)
+	: "r" (&rw->lock)
+	: "cc");
+
+	mb();
+}
+
+static inline void _raw_read_unlock(raw_rwlock_t *rw)
+{
+	unsigned long tmp, tmp2;
+
+	mb();
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%2]\n"
+"	sub	%0, %0, #1\n"
+"	strex	%1, %0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b\n"
+"	cmp	%0, #0\n"
+"	mcreq   p15, 0, %0, c7, c10, 4\n"
+"	seveq"
+	: "=&r" (tmp), "=&r" (tmp2)
+	: "r" (&rw->lock)
+	: "cc");
+}
+
+static inline int _raw_read_trylock(raw_rwlock_t *rw)
+{
+	unsigned long tmp, tmp2 = 1;
+
+	__asm__ __volatile__(
+"1:	ldrex	%0, [%2]\n"
+"	adds	%0, %0, #1\n"
+"	strexpl	%1, %0, [%2]\n"
+	: "=&r" (tmp), "+r" (tmp2)
+	: "r" (&rw->lock)
+	: "cc");
+
+	mb();
+	return tmp2 == 0;
+}
+
+#define _raw_read_can_lock(x)	((x)->lock < 0x80000000)
+
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff -r e701461b1251 xen/include/asm-arm/string.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/string.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,49 @@
+#ifndef __ASM_STRING_H__
+#define __ASM_STRING_H__
+
+/*
+ * We don't do inline string functions, since the
+ * optimised inline asm versions are not small.
+ */
+
+#define __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char * s, int c);
+
+#define __HAVE_ARCH_STRCHR
+extern char * strchr(const char * s, int c);
+
+#define __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *, int, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMZERO
+#define __HAVE_ARCH_MEMSET
+extern void * memset(void *, int, __kernel_size_t);
+
+#define __HAVE_ARCH_BCOPY
+
+extern void __memzero(void *ptr, __kernel_size_t n);
+
+#define memset(p,v,n)						\
+({								\
+	if ((n) != 0) {						\
+		if (__builtin_constant_p((v)) && (v) == 0)	\
+			__memzero((p),(n));			\
+		else						\
+			memset((p),(v),(n));			\
+	}							\
+	(p);							\
+})
+
+#define memzero(p,n) 				\
+({ 						\
+	if ((n) != 0) 				\
+		__memzero((p),(n)); (p); 	\
+})
+
+#endif
diff -r e701461b1251 xen/include/asm-arm/system.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/system.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,148 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <xen/config.h>
+
+#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
+
+#ifndef __ASSEMBLY__
+
+/*
+ * dmb : Data Memory Barrier
+ * dsb : Data Synchronization Barrier
+ * 	-> Drain Write Buffer in earlier of the architecture
+ * isb : Instruction Synchronization Barrier
+ * 	-> Flush pipeline and brach target buffers.
+ */
+
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+
+#define mb()		dmb()
+#define rmb() 		dmb()
+#define wmb() 		dmb()
+
+#define cpu_relax()	dmb()
+
+#define smp_rmb()	rmb()
+#define smp_wmb()	wmb()
+#define smp_mb()	dmb()
+
+#define local_irq_save(x)		\
+({					\
+	__asm__ __volatile__(		\
+		"mrs    %0, cpsr \n"	\
+		"cpsid  i"		\
+		: "=r" (x)		\
+		:			\
+		: "memory", "cc");	\
+})
+
+#define local_irq_enable()  __asm__("cpsie i    @ __sti" : : : "memory", "cc")
+#define local_irq_disable() __asm__("cpsid i    @ __cli" : : : "memory", "cc")
+#define local_fiq_enable()  __asm__("cpsie f    @ __stf" : : : "memory", "cc")
+#define local_fiq_disable() __asm__("cpsid f    @ __clf" : : : "memory", "cc")
+
+/*
+ * Save the current interrupt enable state.
+ */
+#define local_save_flags(x)		\
+({					\
+	__asm__ __volatile__(		\
+	"mrs	%0, cpsr\n"		\
+	: "=r" (x) : : "memory", "cc");	\
+})
+
+/*
+ * restore saved IRQ & FIQ state
+ */
+#define local_irq_restore(x)		\
+({					\
+	__asm__ __volatile__(		\
+	"msr	cpsr_c, %0\n"		\
+	:				\
+	: "r" (x)			\
+	: "memory", "cc");		\
+})
+
+#define irqs_disabled()				\
+({					\
+	unsigned long flags;		\
+	local_save_flags(flags);	\
+	flags & PSR_I_BIT;		\
+})
+
+#define local_irq_is_enabled()	(!irqs_disabled())
+
+static inline void nop(void)
+{
+	asm volatile("nop");
+}
+
+static inline unsigned int get_cr(void)
+{
+	unsigned int val;
+	asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(val) : : "cc");
+
+	return val;
+}
+
+static inline void set_cr(unsigned int val)
+{
+	asm volatile("mcr p15, 0, %0, c1, c0, 0" : : "r"(val) : "cc");
+
+	isb();
+}
+
+static inline unsigned long _xchg(unsigned long x, volatile void * ptr, int size)
+{
+	unsigned long ret;
+	unsigned int tmp;
+
+	switch (size) {
+        case 1:
+		__asm__ __volatile__(
+		"1:     ldrexb  %0, [%3]\n"
+		"       strexb  %1, %2, [%3]\n"
+		"       teq     %1, #0\n"
+		"       bne     1b"
+		: "=&r" (ret), "=&r" (tmp)
+		: "r" (x), "r" (ptr)
+		: "memory", "cc");
+		break;
+	case 4:
+		__asm__ __volatile__("@ __xchg4\n"
+		"1:     ldrex   %0, [%3]\n"
+		"       strex   %1, %2, [%3]\n"
+		"       teq     %1, #0\n"
+		"       bne     1b"
+		: "=&r" (ret), "=&r" (tmp)
+		: "r" (x), "r" (ptr)
+		: "memory", "cc");
+		break;
+	default:
+		ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
+#define cmpxchg(ptr, old, new)						\
+({ 									\
+	__typeof__(*(ptr)) prev; 					\
+	unsigned long flags;						\
+	local_irq_save(flags);						\
+	prev = *((__typeof__(*(ptr)) *)ptr); 				\
+	if(prev == old) 						\
+		*((__typeof__(*(ptr)) *)ptr) = (__typeof__(*(ptr)))new;	\
+	local_irq_restore(flags);					\
+	prev; 								\
+})
+
+#define xchg(ptr,v)	\
+	((__typeof__(*(ptr)))_xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#endif /* __ASSEMBLY__ */
+#endif /*!__SYSTEM_H__ */
diff -r e701461b1251 xen/include/asm-arm/tegra/config.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/tegra/config.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,11 @@
+#ifndef __TEGRA_CONFIG_H__
+#define __TEGRA_CONFIG_H__
+
+#define HZ	100
+#define CLOCK_TICK_RATE		1000000
+
+#define MAX_PHYS_CPUS		2
+
+#define BUILTIN_COMMAND_LINE_SIZE 256
+#define BUILTIN_COMMAND_LINE	""
+#endif
diff -r e701461b1251 xen/include/asm-arm/time.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/time.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,24 @@
+#ifndef __ASM_TIME_H__
+#define __ASM_TIME_H__
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/softirq.h>
+
+#ifndef __ASSEMBLY__
+#define watchdog_disable() ((void)0)
+#define watchdog_enable()  ((void)0)
+
+struct tm;
+struct tm wallclock_time(void);
+
+typedef u64 cycle_t;
+
+static inline cycle_t get_cycles(void)
+{
+	return 0;
+}
+
+void timekeeping_init(void);
+#endif
+#endif
diff -r e701461b1251 xen/include/asm-arm/trace.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/trace.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,6 @@
+#ifndef __ARM_TRACE_H__
+#define __ARM_TRACE_H__
+
+
+#endif /*!__ARM_TRACE_H__*/
+
diff -r e701461b1251 xen/include/asm-arm/types.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/types.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,58 @@
+#ifndef __ARM_TYPES_H__
+#define __ARM_TYPES_H__
+
+#define BITS_PER_LONG	32
+#define BYTES_PER_LONG	4
+#define LONG_BYTEORDER	2
+
+#ifndef __ASSEMBLY__
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+typedef unsigned long physaddr_t;
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+typedef unsigned long paddr_t;
+typedef unsigned long vaddr_t;
+
+typedef unsigned long size_t;
+
+typedef char bool_t;
+
+#define test_and_set_bool(b)	xchg(&(b), 1)
+#define test_and_clear_bool(b)	xchg(&(b), 0)
+
+#define round_up(_p, _s)        (((unsigned long)(_p) + ((_s) - 1)) & ~((_s) - 1))
+#define round_down(_p, _s)      ((unsigned long)(_p) & ~((_s) - 1))
+
+#define round_up_and_div(_p, _s) (round_up(_p, _s) / _s)
+#endif /* __ASSEMBLY__ */
+
+#endif
diff -r e701461b1251 xen/include/asm-arm/xenoprof.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-arm/xenoprof.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,43 @@
+#ifndef __ASM_XENOPROF_H__
+#define __ASM_XENOPROF_H__
+
+#define xenoprof_arch_reserve_counters()	(0)
+#define xenoprof_arch_setup_events()		(0)
+#define xenoprof_arch_enable_virq()		(0)
+#define xenoprof_arch_start() 			(0)
+#define xenoprof_arch_stop()
+#define xenoprof_arch_disable_virq() 
+#define xenoprof_arch_release_counters()
+
+
+#define xenoprof_shared_gmfn(d, gmaddr, maddr)	\
+do {						\
+	(void)(maddr);				\
+} while (0)
+
+
+static inline void ibs_init(void) {}
+#define ibs_caps 0
+
+static inline int xenoprof_backtrace_supported(void)
+{
+	return 0;
+}
+
+struct vcpu;
+struct cpu_user_regs;
+
+int xenoprof_arch_counter(XEN_GUEST_HANDLE(void) arg);
+int compat_oprof_arch_counter(XEN_GUEST_HANDLE(void) arg);
+int xenoprof_arch_ibs_counter(XEN_GUEST_HANDLE(void) arg);
+
+static inline void xenoprof_backtrace(
+    struct domain *d, struct vcpu *vcpu,
+    struct cpu_user_regs *const regs, unsigned long depth, int mode) {}
+
+static inline int xenoprof_arch_init(int *num_events, char *cpu_type)
+{
+	return 0;
+}
+
+#endif
diff -r e701461b1251 xen/include/public/arch-arm.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/public/arch-arm.h	Fri Feb 03 16:07:03 2012 +0900
@@ -0,0 +1,180 @@
+#ifndef __XEN_PUBLIC_ARCH_ARM_32_H__
+#define __XEN_PUBLIC_ARCH_ARM_32_H__
+
+#define VPSR_MODE_SVC26         0x00000003
+#define VPSR_MODE_USR           0x00000010
+#define VPSR_MODE_FIQ           0x00000011
+#define VPSR_MODE_IRQ           0x00000012
+#define VPSR_MODE_SVC           0x00000013
+#define VPSR_MODE_ABT           0x00000017
+#define VPSR_MODE_UND           0x0000001b
+#define VPSR_MODE_SYS           0x0000001f
+#define VPSR_MODE_MASK          0x0000001f
+
+#define VPSR_T_BIT              0x00000020
+#define VPSR_F_BIT              0x00000040
+#define VPSR_I_BIT              0x00000100
+#define VPSR_J_BIT              0x01000000
+#define VPSR_Q_BIT              0x08000000
+#define VPSR_V_BIT              0x10000000
+#define VPSR_C_BIT              0x20000000
+#define VPSR_Z_BIT              0x40000000
+#define VPSR_N_BIT              0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define VPSR_MASK_INTR          (VPSR_I_BIT | VPSR_F_BIT)
+#define VPSR_MASK_MODE          0x000001f
+#define VPSR_MASK_FLAGS         0xff000000      /* Flags                */
+#define VPSR_MASK_STATUS        0x00ff0000      /* Status               */
+#define VPSR_MASK_EXTENSION     0x0000ff00      /* Extension            */
+#define VPSR_MASK_CONTROL       0x000000ff      /* Control              */
+
+/*
+ * HYPERCALLS for ARM architecture
+ */
+#define __HYPERVISOR_restore_trap_frame            23
+
+#define __HYPERVISOR_set_cpu_domain                48
+#define __HYPERVISOR_do_set_foreground_domain      49
+#define __HYPERVISOR_do_gcov_op                    40
+#define __HYPERVISOR_do_vfp_op                     51
+#define __HYPERVISOR_do_set_tls                    52
+
+#define TLBF_ITLB               1
+#define TLBF_DTLB               2
+#define TLBF_ASID               4
+
+
+#define CMD_FMRX                0
+#define CMD_FMXR                1
+
+#define FPEXC_XEN               0
+#define FPINST_XEN              1
+#define FPINST2_XEN             2
+#define MVFR0_XEN               3
+
+/* FPEXC bits */
+#define FPEXC_EXCEPTION         (1<<31)
+#define FPEXC_ENABLE            (1<<30)
+
+
+#ifndef __ASSEMBLY__
+#ifdef __XEN__
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+    typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
+    typedef type * __guest_handle_ ## name
+#endif
+    
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+    ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
+    ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
+
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
+    
+
+/*
+ * Virtual addresses beyond this are not modifiable by guest OSes. The 
+ * machine->physical mapping table starts at this address, read-only.
+ */
+#define __HYPERVISOR_VIRT_START 0xFC000000
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
+#endif
+
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+typedef unsigned long xen_pfn_t;
+typedef unsigned long xen_ulong_t;
+
+typedef struct trap_info {
+	unsigned long instruction;
+}trap_info_t;
+
+DEFINE_XEN_GUEST_HANDLE(trap_info_t);
+
+typedef struct vcpu_guest_context {
+	unsigned long	r0;
+	unsigned long	r1;
+	unsigned long	r2;
+	unsigned long	r3;
+	unsigned long	r4;
+	unsigned long	r5;
+	unsigned long	r6;
+	unsigned long	r7;
+	unsigned long	r8;
+	unsigned long	r9;
+	unsigned long	r10;
+	unsigned long	r11;
+	unsigned long	r12;
+	unsigned long	r13;
+	unsigned long	r14;
+	unsigned long	r15;
+	unsigned long   vbar;
+	unsigned long   dacr;
+	unsigned long   contextidr;
+	unsigned long   fcseidr;
+	unsigned long   ttbr0;
+	unsigned long   ttbr1;
+	unsigned long   ttbcr;
+	unsigned long	cpar;
+} vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+typedef struct cpu_user_regs {
+        unsigned long   r0;
+        unsigned long   r1;
+        unsigned long   r2;
+        unsigned long   r3;
+        unsigned long   r4;
+        unsigned long   r5;
+        unsigned long   r6;
+        unsigned long   r7;
+        unsigned long   r8;
+        unsigned long   r9;
+        unsigned long   r10;
+        unsigned long   r11;
+        unsigned long   r12;
+        unsigned long   r13;
+        unsigned long   r14;
+        unsigned long   r15;
+	unsigned long	psr;
+} cpu_user_regs_t;
+DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+
+typedef struct arch_vcpu_info {
+	unsigned long	sp;
+	unsigned long	lr;
+	unsigned long	cpsr;
+	unsigned long	spsr;
+	unsigned long	cr;
+	unsigned long	cpar;
+	unsigned long	dacr;
+	unsigned long	pidr;
+	unsigned long	far;
+	unsigned long	fsr;
+	unsigned long	reserved10;
+	unsigned long	reserved11;
+	unsigned long	reserved12;
+	unsigned long	reserved13;
+	unsigned long	reserved14;
+} arch_vcpu_info_t;
+
+#define XEN_LEGACY_MAX_VCPUS	4
+
+typedef struct arch_shared_info {
+	unsigned long	platform;
+	unsigned long	max_pfn;
+	unsigned long	pfn_to_mfn_frame_list_list;
+} arch_shared_info_t;
+
+#define ELF_SIZE	32
+#endif
+#endif

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

                 reply	other threads:[~2012-02-13  7:53 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0LZB0032AN8MRKB0@mailout2.samsung.com \
    --to=jm77.ryu@samsung.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=Stefano.Stabellini@eu.citrix.com \
    --cc=keir@xen.org \
    --cc=lars.kurth@citrix.com \
    --cc=sbuk.suh@samsung.com \
    --cc=xen-arm@lists.xensource.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).