From: Mukesh Rathor <mukesh.rathor@oracle.com>
To: Xen-devel@lists.xensource.com
Subject: [PATCH 17/17] PVH xen: PVH dom0 creation....
Date: Tue, 23 Apr 2013 14:26:06 -0700 [thread overview]
Message-ID: <1366752366-16594-18-git-send-email-mukesh.rathor@oracle.com> (raw)
In-Reply-To: <1366752366-16594-1-git-send-email-mukesh.rathor@oracle.com>
Finally, the hardest. Mostly modify construct_dom0() to boot PV dom0 in
PVH mode. Introduce, opt_dom0pvh, which when specified in the command
line, causes dom0 to boot in PVH mode.
Note, the call to elf_load_binary() is moved down after required PVH setup
so that we can use the code path for both PV and PVH.
Change in V2:
- Map the entire IO region upfront in the P2M for PVH dom0.
Change in V3:
- Fixup pvh_map_all_iomem() to make sure map upto 4GB of io space.
- remove use of dbg_* functions.
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
xen/arch/x86/domain_build.c | 268 ++++++++++++++++++++++++++++++++++---------
xen/arch/x86/mm/hap/hap.c | 14 +++
xen/arch/x86/setup.c | 10 ++-
xen/include/asm-x86/hap.h | 1 +
4 files changed, 236 insertions(+), 57 deletions(-)
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index f8cae52..2558017 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -35,6 +35,7 @@
#include <asm/setup.h>
#include <asm/bzimage.h> /* for bzimage_parse */
#include <asm/io_apic.h>
+#include <asm/hap.h>
#include <public/version.h>
@@ -307,6 +308,69 @@ static void __init process_dom0_ioports_disable(void)
}
}
+/*
+ * Set the 1:1 map for all non-RAM regions for dom 0. Thus, dom0 will have
+ * the entire io region mapped in the EPT/NPT.
+ */
+static __init void pvh_map_all_iomem(struct domain *d)
+{
+ unsigned long start_pfn, end_pfn, end, start = 0;
+ const struct e820entry *entry;
+ unsigned int i, nump;
+ int rc;
+
+ for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
+ {
+ end = entry->addr + entry->size;
+
+ if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE ||
+ i == e820.nr_map - 1 )
+ {
+ start_pfn = PFN_DOWN(start);
+ end_pfn = PFN_UP(end);
+
+ if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE )
+ end_pfn = PFN_UP(entry->addr);
+
+ if ( start_pfn < end_pfn )
+ {
+ nump = end_pfn - start_pfn;
+ /* Add pages to the mapping */
+ rc = domctl_memory_mapping(d, start_pfn, start_pfn, nump, 1);
+ BUG_ON(rc);
+ }
+ start = end;
+ }
+ }
+
+ /* If the e820 ended under 4GB, we must map the remaining space upto 4GB */
+ if ( end < GB(4) )
+ {
+ start_pfn = PFN_UP(end);
+ end_pfn = (GB(4)) >> PAGE_SHIFT;
+ nump = end_pfn - start_pfn;
+ rc = domctl_memory_mapping(d, start_pfn, start_pfn, nump, 1);
+ BUG_ON(rc);
+ }
+}
+
+static __init void dom0_update_physmap(struct domain *d, unsigned long pfn,
+ unsigned long mfn, unsigned long vphysmap_s)
+{
+ if ( is_pvh_domain(d) )
+ {
+ int rc = guest_physmap_add_page(d, pfn, mfn, 0);
+ BUG_ON(rc);
+ return;
+ }
+ if ( !is_pv_32on64_domain(d) )
+ ((unsigned long *)vphysmap_s)[pfn] = mfn;
+ else
+ ((unsigned int *)vphysmap_s)[pfn] = mfn;
+
+ set_gpfn_from_mfn(mfn, pfn);
+}
+
/*
* Copy or zero function for dom0 only during boot. This because
* raw_copy_to_guest -> copy_to_user_hvm -> __hvm_copy needs curr to
@@ -351,6 +415,7 @@ int __init construct_dom0(
void *(*bootstrap_map)(const module_t *),
char *cmdline)
{
+ char *si_buf=NULL;
int i, cpu, rc, compatible, compat32, order, machine;
struct cpu_user_regs *regs;
unsigned long pfn, mfn;
@@ -359,7 +424,7 @@ int __init construct_dom0(
unsigned long alloc_spfn;
unsigned long alloc_epfn;
unsigned long initrd_pfn = -1, initrd_mfn = 0;
- unsigned long count;
+ unsigned long count, shared_info_paddr = 0;
struct page_info *page = NULL;
start_info_t *si;
struct vcpu *v = d->vcpu[0];
@@ -448,11 +513,19 @@ int __init construct_dom0(
return -EINVAL;
}
- if ( parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE &&
- !test_bit(XENFEAT_dom0, parms.f_supported) )
+ if ( parms.elf_notes[XEN_ELFNOTE_SUPPORTED_FEATURES].type != XEN_ENT_NONE )
{
- printk("Kernel does not support Dom0 operation\n");
- return -EINVAL;
+ if ( !test_bit(XENFEAT_dom0, parms.f_supported) )
+ {
+ printk("Kernel does not support Dom0 operation\n");
+ return -EINVAL;
+ }
+ if ( is_pvh_domain(d) &&
+ !test_bit(XENFEAT_hvm_callback_vector, parms.f_supported) )
+ {
+ printk("Kernel does not support PVH mode\n");
+ return -EINVAL;
+ }
}
if ( compat32 )
@@ -517,6 +590,14 @@ int __init construct_dom0(
vstartinfo_end = (vstartinfo_start +
sizeof(struct start_info) +
sizeof(struct dom0_vga_console_info));
+
+ if ( is_pvh_domain(d) )
+ {
+ /* note, following is paddr as opposed to maddr */
+ shared_info_paddr = round_pgup(vstartinfo_end) - v_start;
+ vstartinfo_end += PAGE_SIZE;
+ }
+
vpt_start = round_pgup(vstartinfo_end);
for ( nr_pt_pages = 2; ; nr_pt_pages++ )
{
@@ -658,16 +739,34 @@ int __init construct_dom0(
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table;
l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
}
- clear_page(l4tab);
- init_guest_l4_table(l4tab, d);
- v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
- if ( is_pv_32on64_domain(d) )
- v->arch.guest_table_user = v->arch.guest_table;
+ if ( is_pvh_domain(d) )
+ {
+ v->arch.cr3 = v->arch.hvm_vcpu.guest_cr[3] = (vpt_start - v_start);
+
+ /* HAP is required for PVH and pfns are serially mapped there */
+ pfn = 0;
+ }
+ else
+ {
+ clear_page(l4tab);
+ init_guest_l4_table(l4tab, d);
+ v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
+ if ( is_pv_32on64_domain(d) )
+ v->arch.guest_table_user = v->arch.guest_table;
+ pfn = alloc_spfn;
+ }
l4tab += l4_table_offset(v_start);
- pfn = alloc_spfn;
for ( count = 0; count < ((v_end-v_start)>>PAGE_SHIFT); count++ )
{
+ /*
+ * initrd chunk's mfns are allocated from a separate mfn chunk. Hence
+ * we need to adjust for them.
+ */
+ signed long pvh_adj = is_pvh_domain(d) ?
+ (PFN_UP(initrd_len) - alloc_spfn) << PAGE_SHIFT
+ : 0;
+
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
@@ -694,16 +793,17 @@ int __init construct_dom0(
clear_page(l3tab);
if ( count == 0 )
l3tab += l3_table_offset(v_start);
- *l4tab = l4e_from_paddr(__pa(l3start), L4_PROT);
+ *l4tab = l4e_from_paddr(__pa(l3start) + pvh_adj, L4_PROT);
l4tab++;
}
- *l3tab = l3e_from_paddr(__pa(l2start), L3_PROT);
+ *l3tab = l3e_from_paddr(__pa(l2start) + pvh_adj, L3_PROT);
l3tab++;
}
- *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
+ *l2tab = l2e_from_paddr(__pa(l1start) + pvh_adj, L2_PROT);
l2tab++;
}
- if ( count < initrd_pfn || count >= initrd_pfn + PFN_UP(initrd_len) )
+ if ( is_pvh_domain(d) ||
+ count < initrd_pfn || count >= initrd_pfn + PFN_UP(initrd_len) )
mfn = pfn++;
else
mfn = initrd_mfn++;
@@ -711,6 +811,9 @@ int __init construct_dom0(
L1_PROT : COMPAT_L1_PROT));
l1tab++;
+ if ( is_pvh_domain(d) )
+ continue;
+
page = mfn_to_page(mfn);
if ( (page->u.inuse.type_info == 0) &&
!get_page_and_type(page, d, PGT_writable_page) )
@@ -739,6 +842,9 @@ int __init construct_dom0(
COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2tab));
}
+ if ( is_pvh_domain(d) )
+ goto pvh_skip_pt_rdonly;
+
/* Pages that are part of page tables must be read only. */
l4tab = l4start + l4_table_offset(vpt_start);
l3start = l3tab = l4e_to_l3e(*l4tab);
@@ -778,6 +884,8 @@ int __init construct_dom0(
}
}
+pvh_skip_pt_rdonly:
+
/* Mask all upcalls... */
for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
@@ -801,35 +909,20 @@ int __init construct_dom0(
write_ptbase(v);
mapcache_override_current(v);
- /* Copy the OS image and free temporary buffer. */
- elf.dest = (void*)vkern_start;
- rc = elf_load_binary(&elf, (is_pvh_domain(d) ? v_start : 0));
- if ( rc < 0 )
- {
- printk("Failed to load the kernel binary\n");
- return rc;
- }
- bootstrap_map(NULL);
-
- if ( UNSET_ADDR != parms.virt_hypercall )
+ /* Set up start info area. */
+ if ( is_pvh_domain(d) )
{
- if ( (parms.virt_hypercall < v_start) ||
- (parms.virt_hypercall >= v_end) )
+ /* avoid calling copy for every write to the vstartinfo_start */
+ if ( (si_buf = xmalloc_bytes(PAGE_SIZE)) == NULL )
{
- mapcache_override_current(NULL);
- write_ptbase(current);
- printk("Invalid HYPERCALL_PAGE field in ELF notes.\n");
- return -1;
+ printk("PVH: xmalloc failed to alloc %ld bytes.\n", PAGE_SIZE);
+ return -ENOMEM;
}
- hypercall_page_initialise(
- d, (void *)(unsigned long)parms.virt_hypercall);
+ si = (start_info_t *)si_buf;
}
+ else
+ si = (start_info_t *)vstartinfo_start;
- /* Free temporary buffers. */
- discard_initial_images();
-
- /* Set up start info area. */
- si = (start_info_t *)vstartinfo_start;
clear_page(si);
si->nr_pages = nr_pages;
@@ -846,12 +939,16 @@ int __init construct_dom0(
elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : "");
count = d->tot_pages;
+
+ if ( is_pvh_domain(d) )
+ goto pvh_skip_guest_p2m_table;
+
l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
l3tab = NULL;
l2tab = NULL;
l1tab = NULL;
/* Set up the phys->machine table if not part of the initial mapping. */
- if ( parms.p2m_base != UNSET_ADDR )
+ if ( parms.p2m_base != UNSET_ADDR && !is_pvh_domain(d) )
{
unsigned long va = vphysmap_start;
@@ -972,6 +1069,11 @@ int __init construct_dom0(
unmap_domain_page(l3tab);
unmap_domain_page(l4start);
+pvh_skip_guest_p2m_table:
+
+ if (is_pvh_domain(d) )
+ hap_set_pvh_alloc_for_dom0(d, nr_pages);
+
/* Write the phys->machine and machine->phys table entries. */
for ( pfn = 0; pfn < count; pfn++ )
{
@@ -988,11 +1090,8 @@ int __init construct_dom0(
if ( pfn > REVERSE_START && (vinitrd_start || pfn < initrd_pfn) )
mfn = alloc_epfn - (pfn - REVERSE_START);
#endif
- if ( !is_pv_32on64_domain(d) )
- ((unsigned long *)vphysmap_start)[pfn] = mfn;
- else
- ((unsigned int *)vphysmap_start)[pfn] = mfn;
- set_gpfn_from_mfn(mfn, pfn);
+ dom0_update_physmap(d, pfn, mfn, vphysmap_start);
+
if (!(pfn & 0xfffff))
process_pending_softirqs();
}
@@ -1008,8 +1107,8 @@ int __init construct_dom0(
if ( !page->u.inuse.type_info &&
!get_page_and_type(page, d, PGT_writable_page) )
BUG();
- ((unsigned long *)vphysmap_start)[pfn] = mfn;
- set_gpfn_from_mfn(mfn, pfn);
+
+ dom0_update_physmap(d, pfn, mfn, vphysmap_start);
++pfn;
if (!(pfn & 0xfffff))
process_pending_softirqs();
@@ -1029,11 +1128,7 @@ int __init construct_dom0(
#ifndef NDEBUG
#define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
#endif
- if ( !is_pv_32on64_domain(d) )
- ((unsigned long *)vphysmap_start)[pfn] = mfn;
- else
- ((unsigned int *)vphysmap_start)[pfn] = mfn;
- set_gpfn_from_mfn(mfn, pfn);
+ dom0_update_physmap(d, pfn, mfn, vphysmap_start);
#undef pfn
page++; pfn++;
if (!(pfn & 0xfffff))
@@ -1041,6 +1136,50 @@ int __init construct_dom0(
}
}
+ /* Copy the OS image and free temporary buffer. */
+ elf.dest = (void*)vkern_start;
+ rc = elf_load_binary(&elf, (is_pvh_domain(d) ? v_start : 0));
+ if ( rc < 0 )
+ {
+ printk("Failed to load the kernel binary\n");
+ return rc;
+ }
+ bootstrap_map(NULL);
+
+ if ( UNSET_ADDR != parms.virt_hypercall )
+ {
+ void *addr;
+ if ( is_pvh_domain(d) )
+ {
+ if ( (addr = xzalloc_bytes(PAGE_SIZE)) == NULL )
+ {
+ printk("pvh: xzalloc failed for %ld bytes.\n", PAGE_SIZE);
+ return -ENOMEM;
+ }
+ } else
+ addr = (void *)parms.virt_hypercall;
+
+ if ( (parms.virt_hypercall < v_start) ||
+ (parms.virt_hypercall >= v_end) )
+ {
+ mapcache_override_current(NULL);
+ write_ptbase(current);
+ printk("Invalid HYPERCALL_PAGE field in ELF notes.\n");
+ return -1;
+ }
+ hypercall_page_initialise(d, addr);
+
+ if ( is_pvh_domain(d) )
+ {
+ early_pvh_copy_or_zero(parms.virt_hypercall, addr, PAGE_SIZE,
+ v_start);
+ xfree(addr);
+ }
+ }
+
+ /* Free temporary buffers. */
+ discard_initial_images();
+
if ( initrd_len != 0 )
{
si->mod_start = vinitrd_start ?: initrd_pfn;
@@ -1056,6 +1195,16 @@ int __init construct_dom0(
si->console.dom0.info_off = sizeof(struct start_info);
si->console.dom0.info_size = sizeof(struct dom0_vga_console_info);
}
+ if ( is_pvh_domain(d) )
+ {
+ unsigned long mfn = virt_to_mfn(d->shared_info);
+ unsigned long pfn = shared_info_paddr >> PAGE_SHIFT;
+ si->shared_info = shared_info_paddr;
+ dom0_update_physmap(d, pfn, mfn, 0);
+
+ early_pvh_copy_or_zero(vstartinfo_start, si_buf, PAGE_SIZE, v_start);
+ xfree(si_buf);
+ }
if ( is_pv_32on64_domain(d) )
xlat_start_info(si, XLAT_start_info_console_dom0);
@@ -1087,12 +1236,18 @@ int __init construct_dom0(
regs->eip = parms.virt_entry;
regs->esp = vstack_end;
regs->esi = vstartinfo_start;
- regs->eflags = X86_EFLAGS_IF;
+ regs->eflags = X86_EFLAGS_IF | 0x2;
if ( opt_dom0_shadow )
- if ( paging_enable(d, PG_SH_enable) == 0 )
+ {
+ if ( is_pvh_domain(d) )
+ {
+ printk("Invalid option dom0_shadow for PVH\n");
+ return -EINVAL;
+ }
+ if ( paging_enable(d, PG_SH_enable) == 0 )
paging_update_paging_modes(v);
-
+ }
if ( supervisor_mode_kernel )
{
v->arch.pv_vcpu.kernel_ss &= ~3;
@@ -1169,6 +1324,9 @@ int __init construct_dom0(
BUG_ON(rc != 0);
+ if ( is_pvh_domain(d) )
+ pvh_map_all_iomem(d);
+
iommu_dom0_init(dom0);
return 0;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 5aa0852..674c324 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -580,6 +580,20 @@ int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
}
}
+/* Resize hap table. Copied from: libxl_get_required_shadow_memory() */
+void hap_set_pvh_alloc_for_dom0(struct domain *d, unsigned long num_pages)
+{
+ int rc;
+ unsigned long memkb = num_pages * (PAGE_SIZE / 1024);
+
+ memkb = 4 * (256 * d->max_vcpus + 2 * (memkb / 1024));
+ num_pages = ((memkb+1023)/1024) << (20 - PAGE_SHIFT);
+ paging_lock(d);
+ rc = hap_set_allocation(d, num_pages, NULL);
+ paging_unlock(d);
+ BUG_ON(rc);
+}
+
static const struct paging_mode hap_paging_real_mode;
static const struct paging_mode hap_paging_protected_mode;
static const struct paging_mode hap_paging_pae_mode;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 43301a5..f307f24 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -60,6 +60,10 @@ integer_param("maxcpus", max_cpus);
static bool_t __initdata disable_smep;
invbool_param("smep", disable_smep);
+/* Boot dom0 in PVH mode */
+static bool_t __initdata opt_dom0pvh;
+boolean_param("dom0pvh", opt_dom0pvh);
+
/* **** Linux config option: propagated to domain0. */
/* "acpi=off": Sisables both ACPI table parsing and interpreter. */
/* "acpi=force": Override the disable blacklist. */
@@ -545,7 +549,7 @@ void __init __start_xen(unsigned long mbi_p)
{
char *memmap_type = NULL;
char *cmdline, *kextra, *loader;
- unsigned int initrdidx;
+ unsigned int initrdidx, domcr_flags = 0;
multiboot_info_t *mbi = __va(mbi_p);
module_t *mod = (module_t *)__va(mbi->mods_addr);
unsigned long nr_pages, modules_headroom, *module_map;
@@ -1314,7 +1318,9 @@ void __init __start_xen(unsigned long mbi_p)
panic("Could not protect TXT memory regions\n");
/* Create initial domain 0. */
- dom0 = domain_create(0, DOMCRF_s3_integrity, 0);
+ domcr_flags = (opt_dom0pvh ? DOMCRF_pvh | DOMCRF_hap : 0);
+ domcr_flags |= DOMCRF_s3_integrity;
+ dom0 = domain_create(0, domcr_flags, 0);
if ( IS_ERR(dom0) || (alloc_dom0_vcpu0() == NULL) )
panic("Error creating domain 0\n");
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index e03f983..aab8558 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -63,6 +63,7 @@ int hap_track_dirty_vram(struct domain *d,
XEN_GUEST_HANDLE_64(uint8) dirty_bitmap);
extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
+void hap_set_pvh_alloc_for_dom0(struct domain *d, unsigned long num_pages);
#endif /* XEN_HAP_H */
--
1.7.2.3
next prev parent reply other threads:[~2013-04-23 21:26 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-04-23 21:25 [PATCH 00/17][V4]: PVH xen: version 4 patches Mukesh Rathor
2013-04-23 21:25 ` [PATCH 01/17] PVH xen: turn gdb_frames/gdt_ents into union Mukesh Rathor
2013-04-23 21:25 ` [PATCH 02/17] PVH xen: add XENMEM_add_to_physmap_range Mukesh Rathor
2013-04-23 21:25 ` [PATCH 03/17] PVH xen: create domctl_memory_mapping() function Mukesh Rathor
2013-04-24 7:01 ` Jan Beulich
2013-04-23 21:25 ` [PATCH 04/17] PVH xen: add params to read_segment_register Mukesh Rathor
2013-04-23 21:25 ` [PATCH 05/17] PVH xen: vmx realted preparatory changes for PVH Mukesh Rathor
2013-04-23 21:25 ` [PATCH 06/17] PVH xen: Introduce PVH guest type Mukesh Rathor
2013-04-24 7:07 ` Jan Beulich
2013-04-24 23:01 ` Mukesh Rathor
2013-04-25 8:28 ` Jan Beulich
2013-04-23 21:25 ` [PATCH 07/17] PVH xen: tools changes to create PVH domain Mukesh Rathor
2013-04-24 7:10 ` Jan Beulich
2013-04-24 23:02 ` Mukesh Rathor
2013-04-23 21:25 ` [PATCH 08/17] PVH xen: domain creation code changes Mukesh Rathor
2013-04-23 21:25 ` [PATCH 09/17] PVH xen: create PVH vmcs, and also initialization Mukesh Rathor
2013-04-24 7:42 ` Jan Beulich
2013-04-30 21:01 ` Mukesh Rathor
2013-04-30 21:04 ` Mukesh Rathor
2013-04-23 21:25 ` [PATCH 10/17] PVH xen: introduce vmx_pvh.c and pvh.c Mukesh Rathor
2013-04-24 8:47 ` Jan Beulich
2013-04-25 0:57 ` Mukesh Rathor
2013-04-25 8:36 ` Jan Beulich
2013-04-26 1:16 ` Mukesh Rathor
2013-04-26 1:58 ` Mukesh Rathor
2013-04-26 7:29 ` Jan Beulich
2013-04-26 7:20 ` Jan Beulich
2013-04-27 2:06 ` Mukesh Rathor
2013-05-01 0:51 ` Mukesh Rathor
2013-05-01 13:52 ` Jan Beulich
2013-05-02 1:10 ` Mukesh Rathor
2013-05-02 6:42 ` Jan Beulich
2013-05-03 1:03 ` Mukesh Rathor
2013-05-10 1:51 ` Mukesh Rathor
2013-05-10 7:07 ` Jan Beulich
2013-05-10 23:44 ` Mukesh Rathor
2013-05-02 1:17 ` Mukesh Rathor
2013-05-02 6:53 ` Jan Beulich
2013-05-03 0:40 ` Mukesh Rathor
2013-05-03 6:33 ` Jan Beulich
2013-05-04 1:40 ` Mukesh Rathor
2013-05-06 6:44 ` Jan Beulich
2013-05-07 1:25 ` Mukesh Rathor
2013-05-07 8:07 ` Jan Beulich
2013-05-11 0:30 ` Mukesh Rathor
2013-04-25 11:19 ` Tim Deegan
2013-04-23 21:26 ` [PATCH 11/17] PVH xen: some misc changes like mtrr, intr, msi Mukesh Rathor
2013-04-23 21:26 ` [PATCH 12/17] PVH xen: support invalid op, return PVH features etc Mukesh Rathor
2013-04-24 9:01 ` Jan Beulich
2013-04-25 1:01 ` Mukesh Rathor
2013-04-23 21:26 ` [PATCH 13/17] PVH xen: p2m related changes Mukesh Rathor
2013-04-25 11:28 ` Tim Deegan
2013-04-25 21:59 ` Mukesh Rathor
2013-04-26 8:53 ` Tim Deegan
2013-04-23 21:26 ` [PATCH 14/17] PVH xen: Add and remove foreign pages Mukesh Rathor
2013-04-25 11:38 ` Tim Deegan
2013-04-23 21:26 ` [PATCH 15/17] PVH xen: Miscellaneous changes Mukesh Rathor
2013-04-24 9:06 ` Jan Beulich
2013-05-10 1:54 ` Mukesh Rathor
2013-05-10 7:10 ` Jan Beulich
2013-04-23 21:26 ` [PATCH 16/17] PVH xen: elf and iommu related changes to prep for dom0 PVH Mukesh Rathor
2013-04-24 9:15 ` Jan Beulich
2013-05-14 1:16 ` Mukesh Rathor
2013-05-14 6:56 ` Jan Beulich
2013-05-14 19:14 ` Mukesh Rathor
2013-04-23 21:26 ` Mukesh Rathor [this message]
2013-04-24 9:28 ` [PATCH 17/17] PVH xen: PVH dom0 creation Jan Beulich
2013-04-26 1:18 ` Mukesh Rathor
2013-04-26 7:22 ` Jan Beulich
2013-05-10 1:53 ` Mukesh Rathor
2013-05-10 7:14 ` Jan Beulich
2013-05-15 1:18 ` Mukesh Rathor
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1366752366-16594-18-git-send-email-mukesh.rathor@oracle.com \
--to=mukesh.rathor@oracle.com \
--cc=Xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).