From: Sasha Levin <levinsasha928@gmail.com>
To: penberg@kernel.org
Cc: mingo@elte.hu, asias.hejun@gmail.com, prasadjoshi124@gmail.com,
avi@redhat.com, gorcunov@gmail.com, kvm@vger.kernel.org,
Sasha Levin <levinsasha928@gmail.com>
Subject: [PATCH 1/3] kvm tools: Add memory gap for larger RAM sizes
Date: Wed, 11 May 2011 07:09:09 +0300 [thread overview]
Message-ID: <1305086951-31698-1-git-send-email-levinsasha928@gmail.com> (raw)
Add a memory gap between 0xe0000000 and 0x100000000
when using more than 0xe0000000 bytes for guest RAM.
This space is used by several things, PCI configuration
space for example.
This patch updates the e820 table, slot allocations
used for KVM_SET_USER_MEMORY_REGION, and the address
translation.
Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
---
tools/kvm/bios.c | 27 +++++++++++++++++++++------
tools/kvm/include/kvm/e820.h | 2 +-
tools/kvm/include/kvm/kvm.h | 9 ++++++++-
tools/kvm/kvm.c | 22 ++++++++++++++++------
4 files changed, 46 insertions(+), 14 deletions(-)
diff --git a/tools/kvm/bios.c b/tools/kvm/bios.c
index 2199c0c..cd417fa 100644
--- a/tools/kvm/bios.c
+++ b/tools/kvm/bios.c
@@ -61,7 +61,7 @@ static void e820_setup(struct kvm *kvm)
size = guest_flat_to_host(kvm, E820_MAP_SIZE);
mem_map = guest_flat_to_host(kvm, E820_MAP_START);
- *size = E820_MEM_AREAS;
+
mem_map[i++] = (struct e820_entry) {
.addr = REAL_MODE_IVT_BEGIN,
@@ -78,13 +78,28 @@ static void e820_setup(struct kvm *kvm)
.size = MB_BIOS_END - MB_BIOS_BEGIN,
.type = E820_MEM_RESERVED,
};
- mem_map[i++] = (struct e820_entry) {
- .addr = BZ_KERNEL_START,
- .size = kvm->ram_size - BZ_KERNEL_START,
- .type = E820_MEM_USABLE,
- };
+ if (kvm->ram_size < 0xe0000000) {
+ mem_map[i++] = (struct e820_entry) {
+ .addr = BZ_KERNEL_START,
+ .size = kvm->ram_size - BZ_KERNEL_START,
+ .type = E820_MEM_USABLE,
+ };
+ } else {
+ mem_map[i++] = (struct e820_entry) {
+ .addr = BZ_KERNEL_START,
+ .size = 0xe0000000 - BZ_KERNEL_START,
+ .type = E820_MEM_USABLE,
+ };
+ mem_map[i++] = (struct e820_entry) {
+ .addr = 0x100000000ULL,
+ .size = kvm->ram_size - 0xe0000000 - BZ_KERNEL_START,
+ .type = E820_MEM_USABLE,
+ };
+ }
BUILD_BUG_ON(i > E820_MEM_AREAS);
+
+ *size = i;
}
/**
diff --git a/tools/kvm/include/kvm/e820.h b/tools/kvm/include/kvm/e820.h
index 252ae1f..e0f5f2a 100644
--- a/tools/kvm/include/kvm/e820.h
+++ b/tools/kvm/include/kvm/e820.h
@@ -8,7 +8,7 @@
#define E820_MEM_USABLE 1
#define E820_MEM_RESERVED 2
-#define E820_MEM_AREAS 4
+#define E820_MEM_AREAS 5
struct e820_entry {
u64 addr; /* start of memory segment */
diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
index 3dab78d..e9c16ea 100644
--- a/tools/kvm/include/kvm/kvm.h
+++ b/tools/kvm/include/kvm/kvm.h
@@ -60,7 +60,14 @@ static inline u32 segment_to_flat(u16 selector, u16 offset)
static inline void *guest_flat_to_host(struct kvm *self, unsigned long offset)
{
- return self->ram_start + offset;
+ /*
+ * We have a gap between 0xe0000000 and 0x100000000.
+ * Consider it when translating an address above 0x100000000.
+ */
+ if (offset < 0xe0000000)
+ return self->ram_start + offset;
+ else
+ return self->ram_start + 0xe0000000 + (offset - 0x100000000);
}
static inline void *guest_real_to_host(struct kvm *self, u16 selector, u16 offset)
diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c
index 65793f2..976b099 100644
--- a/tools/kvm/kvm.c
+++ b/tools/kvm/kvm.c
@@ -153,23 +153,33 @@ static bool kvm__cpu_supports_vm(void)
return regs.ecx & (1 << feature);
}
-void kvm__init_ram(struct kvm *self)
+static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, u64 userspace_addr)
{
struct kvm_userspace_memory_region mem;
int ret;
mem = (struct kvm_userspace_memory_region) {
- .slot = 0,
- .guest_phys_addr = 0x0UL,
- .memory_size = self->ram_size,
- .userspace_addr = (unsigned long) self->ram_start,
+ .slot = slot,
+ .guest_phys_addr = guest_phys,
+ .memory_size = size,
+ .userspace_addr = userspace_addr,
};
- ret = ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
+ ret = ioctl(kvm->vm_fd, KVM_SET_USER_MEMORY_REGION, &mem);
if (ret < 0)
die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
}
+void kvm__init_ram(struct kvm *self)
+{
+ if (self->ram_size < 0xe0000000) {
+ kvm_register_mem_slot(self, 0, 0, self->ram_size, (u64)self->ram_start);
+ } else {
+ kvm_register_mem_slot(self, 0, 0, 0xe0000000, (u64)self->ram_start);
+ kvm_register_mem_slot(self, 1, 0x100000000ULL, self->ram_size - 0xe0000000, (u64)self->ram_start + 0xe0000000);
+ }
+}
+
int kvm__max_cpus(struct kvm *self)
{
int ret;
--
1.7.5.rc3
next reply other threads:[~2011-05-11 17:29 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-05-11 4:09 Sasha Levin [this message]
2011-05-11 4:09 ` [PATCH 2/3 V2] kvm tools: Prevent PFN wraparound Sasha Levin
2011-05-11 11:15 ` Ingo Molnar
2011-05-11 4:09 ` [PATCH 3/3] kvm tools: Use definitions from kernel headers Sasha Levin
2011-05-11 10:59 ` Ingo Molnar
2011-05-11 7:15 ` [PATCH 1/3] kvm tools: Add memory gap for larger RAM sizes Ingo Molnar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1305086951-31698-1-git-send-email-levinsasha928@gmail.com \
--to=levinsasha928@gmail.com \
--cc=asias.hejun@gmail.com \
--cc=avi@redhat.com \
--cc=gorcunov@gmail.com \
--cc=kvm@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=penberg@kernel.org \
--cc=prasadjoshi124@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox