From: Mukesh Rathor <mukesh.rathor@oracle.com>
To: Xen-devel@lists.xensource.com
Subject: [PATCH 10/17] PVH xen: introduce vmx_pvh.c and pvh.c
Date: Tue, 23 Apr 2013 14:25:59 -0700 [thread overview]
Message-ID: <1366752366-16594-11-git-send-email-mukesh.rathor@oracle.com> (raw)
In-Reply-To: <1366752366-16594-1-git-send-email-mukesh.rathor@oracle.com>
The heart of this patch is vmx exit handler for PVH guest. It is nicely
isolated in a separate module as preferred by most of us. A call to it
is added to vmx_pvh_vmexit_handler().
Changes in V2:
- Move non VMX generic code to arch/x86/hvm/pvh.c
- Remove get_gpr_ptr() and use existing decode_register() instead.
- Defer call to pvh vmx exit handler until interrupts are enabled. So the
caller vmx_pvh_vmexit_handler() handles the NMI/EXT-INT/TRIPLE_FAULT now.
- Fix the CPUID (wrongly) clearing bit 24. No need to do this now, set
the correct feature bits in CR4 during vmcs creation.
- Fix few hard tabs.
Changes in V3:
- Lot of cleanup and rework in PVH vm exit handler.
- add parameter to emulate_forced_invalid_op().
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
xen/arch/x86/hvm/Makefile | 3 +-
xen/arch/x86/hvm/hvm.c | 4 -
xen/arch/x86/hvm/pvh.c | 203 +++++++++++++
xen/arch/x86/hvm/vmx/Makefile | 1 +
xen/arch/x86/hvm/vmx/vmcs.c | 3 +-
xen/arch/x86/hvm/vmx/vmx.c | 8 +
xen/arch/x86/hvm/vmx/vmx_pvh.c | 597 +++++++++++++++++++++++++++++++++++++
xen/arch/x86/traps.c | 23 +-
xen/include/asm-x86/hvm/hvm.h | 6 +
xen/include/asm-x86/hvm/vmx/vmx.h | 5 +
xen/include/asm-x86/processor.h | 1 +
xen/include/asm-x86/pvh.h | 6 +
12 files changed, 847 insertions(+), 13 deletions(-)
create mode 100644 xen/arch/x86/hvm/pvh.c
create mode 100644 xen/arch/x86/hvm/vmx/vmx_pvh.c
create mode 100644 xen/include/asm-x86/pvh.h
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index eea5555..65ff9f3 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -22,4 +22,5 @@ obj-y += vlapic.o
obj-y += vmsi.o
obj-y += vpic.o
obj-y += vpt.o
-obj-y += vpmu.o
\ No newline at end of file
+obj-y += vpmu.o
+obj-y += pvh.o
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 27dbe3d..0d84ec7 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3254,10 +3254,6 @@ static long hvm_vcpu_op(
return rc;
}
-typedef unsigned long hvm_hypercall_t(
- unsigned long, unsigned long, unsigned long, unsigned long, unsigned long,
- unsigned long);
-
#define HYPERCALL(x) \
[ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
diff --git a/xen/arch/x86/hvm/pvh.c b/xen/arch/x86/hvm/pvh.c
new file mode 100644
index 0000000..fe8b89c
--- /dev/null
+++ b/xen/arch/x86/hvm/pvh.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2013, Mukesh Rathor, Oracle Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+#include <xen/hypercall.h>
+#include <xen/guest_access.h>
+#include <asm/p2m.h>
+#include <asm/traps.h>
+#include <asm/hvm/vmx/vmx.h>
+#include <public/sched.h>
+
+
+static int pvh_grant_table_op(unsigned int cmd, XEN_GUEST_HANDLE(void) uop,
+ unsigned int count)
+{
+ switch ( cmd )
+ {
+ /*
+ * Only the following Grant Ops have been verified for PVH guest, hence
+ * we check for them here.
+ */
+ case GNTTABOP_map_grant_ref:
+ case GNTTABOP_unmap_grant_ref:
+ case GNTTABOP_setup_table:
+ case GNTTABOP_copy:
+ case GNTTABOP_query_size:
+ case GNTTABOP_set_version:
+ return do_grant_table_op(cmd, uop, count);
+ }
+ return -ENOSYS;
+}
+
+static long pvh_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
+{
+ long rc = -ENOSYS;
+
+ switch ( cmd )
+ {
+ case VCPUOP_register_runstate_memory_area:
+ case VCPUOP_get_runstate_info:
+ case VCPUOP_set_periodic_timer:
+ case VCPUOP_stop_periodic_timer:
+ case VCPUOP_set_singleshot_timer:
+ case VCPUOP_stop_singleshot_timer:
+ case VCPUOP_is_up:
+ case VCPUOP_up:
+ case VCPUOP_initialise:
+ rc = do_vcpu_op(cmd, vcpuid, arg);
+
+ /* pvh boot vcpu setting context for bringing up smp vcpu */
+ if ( cmd == VCPUOP_initialise )
+ vmx_vmcs_enter(current);
+ }
+ return rc;
+}
+
+static long pvh_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+ switch ( cmd )
+ {
+ case PHYSDEVOP_map_pirq:
+ case PHYSDEVOP_unmap_pirq:
+ case PHYSDEVOP_eoi:
+ case PHYSDEVOP_irq_status_query:
+ case PHYSDEVOP_get_free_pirq:
+ return do_physdev_op(cmd, arg);
+
+ default:
+ if ( IS_PRIV(current->domain) )
+ return do_physdev_op(cmd, arg);
+ }
+ return -ENOSYS;
+}
+
+static long pvh_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
+{
+ long rc = -EINVAL;
+ struct xen_hvm_param harg;
+ struct domain *d;
+
+ if ( copy_from_guest(&harg, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_target_domain_by_id(harg.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ if ( is_hvm_domain(d) )
+ {
+ /* pvh dom0 is building an hvm guest */
+ rcu_unlock_domain(d);
+ return do_hvm_op(op, arg);
+ }
+
+ rc = -ENOSYS;
+ if ( op == HVMOP_set_param )
+ {
+ if ( harg.index == HVM_PARAM_CALLBACK_IRQ )
+ {
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+ uint64_t via = harg.value;
+ uint8_t via_type = (uint8_t)(via >> 56) + 1;
+
+ if ( via_type == HVMIRQ_callback_vector )
+ {
+ hvm_irq->callback_via_type = HVMIRQ_callback_vector;
+ hvm_irq->callback_via.vector = (uint8_t)via;
+ rc = 0;
+ }
+ }
+ }
+ rcu_unlock_domain(d);
+ if ( rc )
+ gdprintk(XENLOG_DEBUG, "op:%ld -ENOSYS\n", op);
+
+ return rc;
+}
+
+static hvm_hypercall_t *pvh_hypercall64_table[NR_hypercalls] = {
+ [__HYPERVISOR_platform_op] = (hvm_hypercall_t *)do_platform_op,
+ [__HYPERVISOR_memory_op] = (hvm_hypercall_t *)do_memory_op,
+ [__HYPERVISOR_xen_version] = (hvm_hypercall_t *)do_xen_version,
+ [__HYPERVISOR_console_io] = (hvm_hypercall_t *)do_console_io,
+ [__HYPERVISOR_grant_table_op] = (hvm_hypercall_t *)pvh_grant_table_op,
+ [__HYPERVISOR_vcpu_op] = (hvm_hypercall_t *)pvh_vcpu_op,
+ [__HYPERVISOR_mmuext_op] = (hvm_hypercall_t *)do_mmuext_op,
+ [__HYPERVISOR_xsm_op] = (hvm_hypercall_t *)do_xsm_op,
+ [__HYPERVISOR_sched_op] = (hvm_hypercall_t *)do_sched_op,
+ [__HYPERVISOR_event_channel_op]= (hvm_hypercall_t *)do_event_channel_op,
+ [__HYPERVISOR_physdev_op] = (hvm_hypercall_t *)pvh_physdev_op,
+ [__HYPERVISOR_hvm_op] = (hvm_hypercall_t *)pvh_hvm_op,
+ [__HYPERVISOR_sysctl] = (hvm_hypercall_t *)do_sysctl,
+ [__HYPERVISOR_domctl] = (hvm_hypercall_t *)do_domctl
+};
+
+/*
+ * Check if hypercall is valid
+ * Returns: 0 if hcall is not valid with eax set to the errno to ret to guest
+ */
+static bool_t hcall_valid(struct cpu_user_regs *regs)
+{
+ struct segment_register sreg;
+
+ hvm_get_segment_register(current, x86_seg_ss, &sreg);
+ if ( unlikely(sreg.attr.fields.dpl == 3) )
+ {
+ regs->eax = -EPERM;
+ return 0;
+ }
+
+ /* Following HCALLs have not been verified for PVH domUs */
+ if ( !IS_PRIV(current->domain) &&
+ (regs->eax == __HYPERVISOR_xsm_op ||
+ regs->eax == __HYPERVISOR_platform_op ||
+ regs->eax == __HYPERVISOR_domctl) ) /* for privcmd mmap */
+ {
+ regs->eax = -ENOSYS;
+ return 0;
+ }
+ return 1;
+}
+
+int pvh_do_hypercall(struct cpu_user_regs *regs)
+{
+ uint32_t hnum = regs->eax;
+
+ if ( hnum >= NR_hypercalls || pvh_hypercall64_table[hnum] == NULL )
+ {
+ gdprintk(XENLOG_WARNING, "PVH: Unimplemented HCALL:%d. Returning "
+ "-ENOSYS. domid:%d IP:%lx SP:%lx\n",
+ hnum, current->domain->domain_id, regs->rip, regs->rsp);
+ regs->eax = -ENOSYS;
+ vmx_update_guest_eip();
+ return HVM_HCALL_completed;
+ }
+
+ if ( regs->eax == __HYPERVISOR_sched_op && regs->rdi == SCHEDOP_shutdown )
+ {
+ domain_crash_synchronous();
+ return HVM_HCALL_completed;
+ }
+
+ if ( !hcall_valid(regs) )
+ return HVM_HCALL_completed;
+
+ current->arch.hvm_vcpu.hcall_preempted = 0;
+ regs->rax = pvh_hypercall64_table[hnum](regs->rdi, regs->rsi, regs->rdx,
+ regs->r10, regs->r8, regs->r9);
+
+ if ( current->arch.hvm_vcpu.hcall_preempted )
+ return HVM_HCALL_preempted;
+
+ return HVM_HCALL_completed;
+}
diff --git a/xen/arch/x86/hvm/vmx/Makefile b/xen/arch/x86/hvm/vmx/Makefile
index 373b3d9..8b71dae 100644
--- a/xen/arch/x86/hvm/vmx/Makefile
+++ b/xen/arch/x86/hvm/vmx/Makefile
@@ -5,3 +5,4 @@ obj-y += vmcs.o
obj-y += vmx.o
obj-y += vpmu_core2.o
obj-y += vvmx.o
+obj-y += vmx_pvh.o
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index e7b0c4b..45e2d84 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1503,7 +1503,8 @@ void vmx_do_resume(struct vcpu *v)
vmx_clear_vmcs(v);
vmx_load_vmcs(v);
- if ( !is_pvh_vcpu(v) ) {
+ if ( !is_pvh_vcpu(v) )
+ {
hvm_migrate_timers(v);
hvm_migrate_pirqs(v);
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 70d0286..ad9344c 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1535,6 +1535,8 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
.virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
.process_isr = vmx_process_isr,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
+ .pvh_set_vcpu_info = vmx_pvh_set_vcpu_info,
+ .pvh_read_descriptor = vmx_pvh_read_descriptor,
};
struct hvm_function_table * __init start_vmx(void)
@@ -2370,6 +2372,12 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
return vmx_failed_vmentry(exit_reason, regs);
+ if ( is_pvh_vcpu(v) )
+ {
+ vmx_pvh_vmexit_handler(regs);
+ return;
+ }
+
if ( v->arch.hvm_vmx.vmx_realmode )
{
/* Put RFLAGS back the way the guest wants it */
diff --git a/xen/arch/x86/hvm/vmx/vmx_pvh.c b/xen/arch/x86/hvm/vmx/vmx_pvh.c
new file mode 100644
index 0000000..3ee5556
--- /dev/null
+++ b/xen/arch/x86/hvm/vmx/vmx_pvh.c
@@ -0,0 +1,597 @@
+/*
+ * Copyright (C) 2013, Mukesh Rathor, Oracle Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <xen/hypercall.h>
+#include <xen/guest_access.h>
+#include <asm/p2m.h>
+#include <asm/traps.h>
+#include <asm/hvm/vmx/vmx.h>
+#include <public/sched.h>
+#include <asm/pvh.h>
+
+#ifndef NDEBUG
+int pvhdbg = 0;
+#define dbgp1(...) do { (pvhdbg == 1) ? printk(__VA_ARGS__) : 0; } while ( 0 )
+#else
+#define dbgp1(...) ((void)0)
+#endif
+
+
+static void read_vmcs_selectors(struct cpu_user_regs *regs)
+{
+ regs->cs = __vmread(GUEST_CS_SELECTOR);
+ regs->ss = __vmread(GUEST_SS_SELECTOR);
+ regs->ds = __vmread(GUEST_DS_SELECTOR);
+ regs->es = __vmread(GUEST_ES_SELECTOR);
+ regs->gs = __vmread(GUEST_GS_SELECTOR);
+ regs->fs = __vmread(GUEST_FS_SELECTOR);
+}
+
+/* returns : 0 == msr read successfully */
+static int vmxit_msr_read(struct cpu_user_regs *regs)
+{
+ u64 msr_content = 0;
+
+ switch ( regs->ecx )
+ {
+ case MSR_IA32_MISC_ENABLE:
+ {
+ rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
+ msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
+ MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
+ break;
+ }
+ default:
+ {
+ /* pvh fixme: see hvm_msr_read_intercept() */
+ rdmsrl(regs->ecx, msr_content);
+ break;
+ }
+ }
+ regs->eax = (uint32_t)msr_content;
+ regs->edx = (uint32_t)(msr_content >> 32);
+ vmx_update_guest_eip();
+
+ dbgp1("msr read c:%lx a:%lx d:%lx RIP:%lx RSP:%lx\n", regs->ecx, regs->eax,
+ regs->edx, regs->rip, regs->rsp);
+
+ return 0;
+}
+
+/* returns : 0 == msr written successfully */
+static int vmxit_msr_write(struct cpu_user_regs *regs)
+{
+ uint64_t msr_content = (uint32_t)regs->eax | ((uint64_t)regs->edx << 32);
+
+ dbgp1("PVH: msr write:0x%lx. eax:0x%lx edx:0x%lx\n", regs->ecx,
+ regs->eax, regs->edx);
+
+ if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
+ {
+ vmx_update_guest_eip();
+ return 0;
+ }
+ return 1;
+}
+
+static int vmxit_debug(struct cpu_user_regs *regs)
+{
+ struct vcpu *vp = current;
+ unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
+
+ write_debugreg(6, exit_qualification | 0xffff0ff0);
+
+ /* gdbsx or another debugger */
+ if ( vp->domain->domain_id != 0 && /* never pause dom0 */
+ guest_kernel_mode(vp, regs) && vp->domain->debugger_attached )
+ {
+ domain_pause_for_debugger();
+ } else {
+ hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
+ }
+ return 0;
+}
+
+/* Returns: rc == 0: handled the MTF vmexit */
+static int vmxit_mtf(struct cpu_user_regs *regs)
+{
+ struct vcpu *vp = current;
+ int rc = -EINVAL, ss = vp->arch.hvm_vcpu.single_step;
+
+ vp->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vp->arch.hvm_vmx.exec_control);
+ vp->arch.hvm_vcpu.single_step = 0;
+
+ if ( vp->domain->debugger_attached && ss )
+ {
+ domain_pause_for_debugger();
+ rc = 0;
+ }
+ return rc;
+}
+
+static int vmxit_int3(struct cpu_user_regs *regs)
+{
+ int ilen = vmx_get_instruction_length();
+ struct vcpu *vp = current;
+ struct hvm_trap trap_info = {
+ .vector = TRAP_int3,
+ .type = X86_EVENTTYPE_SW_EXCEPTION,
+ .error_code = HVM_DELIVER_NO_ERROR_CODE,
+ .insn_len = ilen
+ };
+
+ regs->eip += ilen;
+
+ /* gdbsx or another debugger. Never pause dom0 */
+ if ( vp->domain->domain_id != 0 && guest_kernel_mode(vp, regs) )
+ {
+ dbgp1("[%d]PVH: domain pause for debugger\n", smp_processor_id());
+ current->arch.gdbsx_vcpu_event = TRAP_int3;
+ domain_pause_for_debugger();
+ return 0;
+ }
+
+ regs->eip -= ilen;
+ hvm_inject_trap(&trap_info);
+
+ return 0;
+}
+
+static int vmxit_invalid_op(struct cpu_user_regs *regs)
+{
+ ulong addr = 0;
+
+ if ( guest_kernel_mode(current, regs) ||
+ emulate_forced_invalid_op(regs, &addr) == 0 )
+ {
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ return 0;
+ }
+ if ( addr )
+ hvm_inject_page_fault(0, addr);
+
+ return 0;
+}
+
+/* Returns: rc == 0: handled the exception/NMI */
+static int vmxit_exception(struct cpu_user_regs *regs)
+{
+ unsigned int vector = (__vmread(VM_EXIT_INTR_INFO)) & INTR_INFO_VECTOR_MASK;
+ int rc = -ENOSYS;
+
+ dbgp1(" EXCPT: vec:%d cs:%lx r.IP:%lx\n", vector,
+ __vmread(GUEST_CS_SELECTOR), regs->eip);
+
+ switch ( vector )
+ {
+ case TRAP_debug:
+ rc = vmxit_debug(regs);
+ break;
+
+ case TRAP_int3:
+ rc = vmxit_int3(regs);
+ break;
+
+ case TRAP_invalid_op:
+ rc = vmxit_invalid_op(regs);
+ break;
+
+ case TRAP_no_device:
+ hvm_funcs.fpu_dirty_intercept(); /* vmx_fpu_dirty_intercept */
+ rc = 0;
+ break;
+
+ default:
+ gdprintk(XENLOG_WARNING,
+ "PVH: Unhandled trap:%d. IP:%lx\n", vector, regs->eip);
+ }
+ return rc;
+}
+
+static int vmxit_vmcall(struct cpu_user_regs *regs)
+{
+ if ( pvh_do_hypercall(regs) != HVM_HCALL_preempted )
+ vmx_update_guest_eip();
+ return 0;
+}
+
+/* Returns: rc == 0: success */
+static int access_cr0(struct cpu_user_regs *regs, uint acc_typ, uint64_t *regp)
+{
+ struct vcpu *vp = current;
+
+ if ( acc_typ == VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR )
+ {
+ unsigned long new_cr0 = *regp;
+ unsigned long old_cr0 = __vmread(GUEST_CR0);
+
+ dbgp1("PVH:writing to CR0. RIP:%lx val:0x%lx\n", regs->rip, *regp);
+ if ( (u32)new_cr0 != new_cr0 )
+ {
+ gdprintk(XENLOG_ERR,
+ "Guest setting upper 32 bits in CR0: %lx", new_cr0);
+ return -EPERM;
+ }
+
+ new_cr0 &= ~HVM_CR0_GUEST_RESERVED_BITS;
+ /* ET is reserved and should be always be 1. */
+ new_cr0 |= X86_CR0_ET;
+
+ /* pvh not expected to change to real mode */
+ if ( (new_cr0 & (X86_CR0_PE | X86_CR0_PG)) !=
+ (X86_CR0_PG | X86_CR0_PE) )
+ {
+ gdprintk(XENLOG_ERR,
+ "PVH attempting to turn off PE/PG. CR0:%lx\n", new_cr0);
+ return -EPERM;
+ }
+ /* TS going from 1 to 0 */
+ if ( (old_cr0 & X86_CR0_TS) && ((new_cr0 & X86_CR0_TS) == 0) )
+ vmx_fpu_enter(vp);
+
+ vp->arch.hvm_vcpu.hw_cr[0] = vp->arch.hvm_vcpu.guest_cr[0] = new_cr0;
+ __vmwrite(GUEST_CR0, new_cr0);
+ __vmwrite(CR0_READ_SHADOW, new_cr0);
+ }
+ else
+ {
+ *regp = __vmread(GUEST_CR0);
+ }
+ return 0;
+}
+
+/* Returns: rc == 0: success */
+static int access_cr4(struct cpu_user_regs *regs, uint acc_typ, uint64_t *regp)
+{
+ if ( acc_typ == VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR )
+ {
+ u64 old_cr4 = __vmread(GUEST_CR4);
+
+ if ( (old_cr4 ^ (*regp)) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
+ vpid_sync_all();
+
+ __vmwrite(GUEST_CR4, *regp);
+ }
+ else
+ *regp = __vmread(GUEST_CR4);
+
+ return 0;
+}
+
+/* Returns: rc == 0: success, else -errno */
+static int vmxit_cr_access(struct cpu_user_regs *regs)
+{
+ unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
+ uint acc_typ = VMX_CONTROL_REG_ACCESS_TYPE(exit_qualification);
+ int cr, rc = -EINVAL;
+
+ switch ( acc_typ )
+ {
+ case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
+ case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
+ {
+ uint gpr = VMX_CONTROL_REG_ACCESS_GPR(exit_qualification);
+ uint64_t *regp = decode_register(gpr, regs, 0);
+ cr = VMX_CONTROL_REG_ACCESS_NUM(exit_qualification);
+
+ if ( regp == NULL )
+ break;
+
+ switch ( cr )
+ {
+ case 0:
+ rc = access_cr0(regs, acc_typ, regp);
+ break;
+
+ case 3:
+ gdprintk(XENLOG_ERR,
+ "PVH: unexpected cr3 vmexit. rip:%lx\n",
+ regs->rip);
+ domain_crash_synchronous();
+ break;
+
+ case 4:
+ rc = access_cr4(regs, acc_typ, regp);
+ break;
+ }
+ if ( rc == 0 )
+ vmx_update_guest_eip();
+ break;
+ }
+
+ case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+ {
+ struct vcpu *vp = current;
+ unsigned long cr0 = vp->arch.hvm_vcpu.guest_cr[0] & ~X86_CR0_TS;
+ vp->arch.hvm_vcpu.hw_cr[0] = vp->arch.hvm_vcpu.guest_cr[0] = cr0;
+
+ vmx_fpu_enter(vp);
+ __vmwrite(GUEST_CR0, cr0);
+ __vmwrite(CR0_READ_SHADOW, cr0);
+ vmx_update_guest_eip();
+ rc = 0;
+ }
+ }
+ return rc;
+}
+
+/*
+ * NOTE: a PVH sets IOPL natively by setting bits in the eflags and not by
+ * hypercalls used by a PV.
+ */
+static int vmxit_io_instr(struct cpu_user_regs *regs)
+{
+ int curr_lvl;
+ int requested = (regs->rflags >> 12) & 3;
+
+ read_vmcs_selectors(regs);
+ curr_lvl = regs->cs & 3;
+
+ if ( requested >= curr_lvl && emulate_privileged_op(regs) )
+ return 0;
+
+ hvm_inject_hw_exception(TRAP_gp_fault, regs->error_code);
+ return 0;
+}
+
+static int pvh_ept_handle_violation(unsigned long qualification,
+ paddr_t gpa, struct cpu_user_regs *regs)
+{
+ unsigned long gla, gfn = gpa >> PAGE_SHIFT;
+ p2m_type_t p2mt;
+ mfn_t mfn = get_gfn_query_unlocked(current->domain, gfn, &p2mt);
+
+ gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
+ "gpa %#"PRIpaddr", mfn %#lx, type %i. IP:0x%lx RSP:0x%lx\n",
+ qualification,
+ (qualification & EPT_READ_VIOLATION) ? 'r' : '-',
+ (qualification & EPT_WRITE_VIOLATION) ? 'w' : '-',
+ (qualification & EPT_EXEC_VIOLATION) ? 'x' : '-',
+ (qualification & EPT_EFFECTIVE_READ) ? 'r' : '-',
+ (qualification & EPT_EFFECTIVE_WRITE) ? 'w' : '-',
+ (qualification & EPT_EFFECTIVE_EXEC) ? 'x' : '-',
+ gpa, mfn_x(mfn), p2mt, regs->rip, regs->rsp);
+
+ ept_walk_table(current->domain, gfn);
+
+ if ( qualification & EPT_GLA_VALID )
+ {
+ gla = __vmread(GUEST_LINEAR_ADDRESS);
+ gdprintk(XENLOG_ERR, " --- GLA %#lx\n", gla);
+ }
+
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ return 0;
+}
+
+/*
+ * The cpuid macro clears rcx, so execute cpuid here exactly as the user
+ * process would on a PV guest.
+ */
+static void pvh_user_cpuid(struct cpu_user_regs *regs)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ asm volatile ( "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "0" (regs->eax), "2" (regs->rcx) );
+
+ regs->rax = eax; regs->rbx = ebx; regs->rcx = ecx; regs->rdx = edx;
+}
+
+/*
+ * Main vm exit handler for PVH . Called from vmx_vmexit_handler().
+ * Note: vmx_asm_vmexit_handler updates rip/rsp/eflags in regs{} struct.
+ */
+void vmx_pvh_vmexit_handler(struct cpu_user_regs *regs)
+{
+ unsigned long exit_qualification;
+ unsigned int exit_reason = __vmread(VM_EXIT_REASON);
+ int rc=0, ccpu = smp_processor_id();
+ struct vcpu *vp = current;
+
+ dbgp1("PVH:[%d]left VMCS exitreas:%d RIP:%lx RSP:%lx EFLAGS:%lx CR0:%lx\n",
+ ccpu, exit_reason, regs->rip, regs->rsp, regs->rflags,
+ __vmread(GUEST_CR0));
+
+ /* for guest_kernel_mode() */
+ regs->cs = __vmread(GUEST_CS_SELECTOR);
+
+ switch ( (uint16_t)exit_reason )
+ {
+ case EXIT_REASON_EXCEPTION_NMI: /* 0 */
+ rc = vmxit_exception(regs);
+ break;
+
+ case EXIT_REASON_EXTERNAL_INTERRUPT: /* 1 */
+ break; /* handled in vmx_vmexit_handler() */
+
+ case EXIT_REASON_PENDING_VIRT_INTR: /* 7 */
+ {
+ struct vcpu *v = current;
+
+ /* Disable the interrupt window. */
+ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ break;
+ }
+
+ case EXIT_REASON_CPUID: /* 10 */
+ {
+ if ( guest_kernel_mode(vp, regs) )
+ pv_cpuid(regs);
+ else
+ pvh_user_cpuid(regs);
+
+ vmx_update_guest_eip();
+ break;
+ }
+
+ case EXIT_REASON_HLT: /* 12 */
+ {
+ vmx_update_guest_eip();
+ hvm_hlt(regs->eflags);
+ break;
+ }
+
+ case EXIT_REASON_VMCALL: /* 18 */
+ rc = vmxit_vmcall(regs);
+ break;
+
+ case EXIT_REASON_CR_ACCESS: /* 28 */
+ rc = vmxit_cr_access(regs);
+ break;
+
+ case EXIT_REASON_DR_ACCESS: /* 29 */
+ {
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
+ vmx_dr_access(exit_qualification, regs);
+ break;
+ }
+
+ case EXIT_REASON_IO_INSTRUCTION: /* 30 */
+ vmxit_io_instr(regs);
+ break;
+
+ case EXIT_REASON_MSR_READ: /* 31 */
+ rc = vmxit_msr_read(regs);
+ break;
+
+ case EXIT_REASON_MSR_WRITE: /* 32 */
+ rc = vmxit_msr_write(regs);
+ break;
+
+ case EXIT_REASON_MONITOR_TRAP_FLAG: /* 37 */
+ rc = vmxit_mtf(regs);
+ break;
+
+ case EXIT_REASON_MCE_DURING_VMENTRY: /* 41 */
+ break; /* handled in vmx_vmexit_handler() */
+
+ case EXIT_REASON_EPT_VIOLATION: /* 48 */
+ {
+ paddr_t gpa = __vmread(GUEST_PHYSICAL_ADDRESS);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
+ rc = pvh_ept_handle_violation(exit_qualification, gpa, regs);
+ break;
+ }
+
+ default:
+ rc = 1;
+ gdprintk(XENLOG_ERR,
+ "PVH: Unexpected exit reason:0x%x\n", exit_reason);
+ }
+ if ( rc )
+ {
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
+ gdprintk(XENLOG_ERR,
+ "PVH: [%d] exit_reas:%d 0x%x qual:%ld 0x%lx cr0:0x%016lx\n",
+ ccpu, exit_reason, exit_reason, exit_qualification,
+ exit_qualification, __vmread(GUEST_CR0));
+ gdprintk(XENLOG_ERR, "PVH: RIP:%lx RSP:%lx EFLAGS:%lx CR3:%lx\n",
+ regs->rip, regs->rsp, regs->rflags, __vmread(GUEST_CR3));
+ domain_crash_synchronous();
+ }
+}
+
+/*
+ * Sets info for non boot SMP vcpu. VCPU 0 context is set by the library.
+ * In case of linux, the call comes from cpu_initialize_context().
+ */
+int vmx_pvh_set_vcpu_info(struct vcpu *v, struct vcpu_guest_context *ctxtp)
+{
+ if ( v->vcpu_id == 0 )
+ return 0;
+
+ vmx_vmcs_enter(v);
+ __vmwrite(GUEST_GDTR_BASE, ctxtp->gdt.pvh.addr);
+ __vmwrite(GUEST_GDTR_LIMIT, ctxtp->gdt.pvh.limit);
+ __vmwrite(GUEST_GS_BASE, ctxtp->gs_base_user);
+
+ __vmwrite(GUEST_CS_SELECTOR, ctxtp->user_regs.cs);
+ __vmwrite(GUEST_DS_SELECTOR, ctxtp->user_regs.ds);
+ __vmwrite(GUEST_ES_SELECTOR, ctxtp->user_regs.es);
+ __vmwrite(GUEST_SS_SELECTOR, ctxtp->user_regs.ss);
+ __vmwrite(GUEST_GS_SELECTOR, ctxtp->user_regs.gs);
+
+ if ( vmx_add_guest_msr(MSR_SHADOW_GS_BASE) )
+ return -EINVAL;
+
+ vmx_write_guest_msr(MSR_SHADOW_GS_BASE, ctxtp->gs_base_kernel);
+
+ vmx_vmcs_exit(v);
+ return 0;
+}
+
+int vmx_pvh_read_descriptor(unsigned int sel, const struct vcpu *v,
+ const struct cpu_user_regs *regs,
+ unsigned long *base, unsigned long *limit,
+ unsigned int *ar)
+{
+ unsigned int tmp_ar = 0;
+ ASSERT(v == current);
+ ASSERT(is_pvh_vcpu(v));
+
+ if ( sel == (unsigned int)regs->cs )
+ {
+ *base = __vmread(GUEST_CS_BASE);
+ *limit = __vmread(GUEST_CS_LIMIT);
+ tmp_ar = __vmread(GUEST_CS_AR_BYTES);
+ }
+ else if ( sel == (unsigned int)regs->ds )
+ {
+ *base = __vmread(GUEST_DS_BASE);
+ *limit = __vmread(GUEST_DS_LIMIT);
+ tmp_ar = __vmread(GUEST_DS_AR_BYTES);
+ }
+ else if ( sel == (unsigned int)regs->ss )
+ {
+ *base = __vmread(GUEST_SS_BASE);
+ *limit = __vmread(GUEST_SS_LIMIT);
+ tmp_ar = __vmread(GUEST_SS_AR_BYTES);
+ }
+ else if ( sel == (unsigned int)regs->gs )
+ {
+ *base = __vmread(GUEST_GS_BASE);
+ *limit = __vmread(GUEST_GS_LIMIT);
+ tmp_ar = __vmread(GUEST_GS_AR_BYTES);
+ }
+ else if ( sel == (unsigned int)regs->fs )
+ {
+ *base = __vmread(GUEST_FS_BASE);
+ *limit = __vmread(GUEST_FS_LIMIT);
+ tmp_ar = __vmread(GUEST_FS_AR_BYTES);
+ }
+ else if ( sel == (unsigned int)regs->es )
+ {
+ *base = __vmread(GUEST_ES_BASE);
+ *limit = __vmread(GUEST_ES_LIMIT);
+ tmp_ar = __vmread(GUEST_ES_AR_BYTES);
+ }
+ else
+ {
+ gdprintk(XENLOG_WARNING, "Unmatched segment selector:%d\n", sel);
+ return 0;
+ }
+
+ if ( tmp_ar & X86_SEG_AR_CS_LM_ACTIVE )
+ {
+ *base = 0UL;
+ *limit = ~0UL;
+ }
+ /* Fixup ar so that it looks the same as in native mode */
+ *ar = (tmp_ar << 8);
+
+ return 1;
+}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index dbea755..b95bed5 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -905,17 +905,22 @@ static int emulate_invalid_rdtscp(struct cpu_user_regs *regs)
return EXCRET_fault_fixed;
}
-static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
+int emulate_forced_invalid_op(struct cpu_user_regs *regs,
+ unsigned long *addrp)
{
char sig[5], instr[2];
- unsigned long eip, rc;
+ unsigned long eip, rc, addr;
eip = regs->eip;
/* Check for forced emulation signature: ud2 ; .ascii "xen". */
- if ( (rc = copy_from_user(sig, (char *)eip, sizeof(sig))) != 0 )
+ if ( (rc = raw_copy_from_guest(sig, (char *)eip, sizeof(sig))) != 0 )
{
- propagate_page_fault(eip + sizeof(sig) - rc, 0);
+ addr = eip + sizeof(sig) - rc;
+ if ( addrp )
+ *addrp = addr;
+ else
+ propagate_page_fault(addr, 0);
return EXCRET_fault_fixed;
}
if ( memcmp(sig, "\xf\xbxen", sizeof(sig)) )
@@ -923,9 +928,13 @@ static int emulate_forced_invalid_op(struct cpu_user_regs *regs)
eip += sizeof(sig);
/* We only emulate CPUID. */
- if ( ( rc = copy_from_user(instr, (char *)eip, sizeof(instr))) != 0 )
+ if ( ( rc = raw_copy_from_guest(instr, (char *)eip, sizeof(instr))) != 0 )
{
- propagate_page_fault(eip + sizeof(instr) - rc, 0);
+ addr = eip + sizeof(instr) - rc;
+ if ( addrp )
+ *addrp = addr;
+ else
+ propagate_page_fault(addr, 0);
return EXCRET_fault_fixed;
}
if ( memcmp(instr, "\xf\xa2", sizeof(instr)) )
@@ -954,7 +963,7 @@ void do_invalid_op(struct cpu_user_regs *regs)
if ( likely(guest_mode(regs)) )
{
if ( !emulate_invalid_rdtscp(regs) &&
- !emulate_forced_invalid_op(regs) )
+ !emulate_forced_invalid_op(regs, NULL) )
do_guest_trap(TRAP_invalid_op, regs, 0);
return;
}
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index a790954..e2f99f3 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -514,4 +514,10 @@ bool_t nhvm_vmcx_hap_enabled(struct vcpu *v);
/* interrupt */
enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v);
+
+/* hypercall table typedef for HVM */
+typedef unsigned long hvm_hypercall_t(
+ unsigned long, unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long);
+
#endif /* __ASM_X86_HVM_HVM_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 4c97d50..a9bca14 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -445,6 +445,11 @@ void setup_ept_dump(void);
void vmx_update_guest_eip(void);
void vmx_dr_access(unsigned long exit_qualification,struct cpu_user_regs *regs);
+void vmx_pvh_vmexit_handler(struct cpu_user_regs *regs);
+int vmx_pvh_set_vcpu_info(struct vcpu *v, struct vcpu_guest_context *ctxtp);
+int vmx_pvh_read_descriptor(unsigned int sel, const struct vcpu *v,
+ const struct cpu_user_regs *regs, unsigned long *base,
+ unsigned long *limit, unsigned int *ar);
int alloc_p2m_hap_data(struct p2m_domain *p2m);
void free_p2m_hap_data(struct p2m_domain *p2m);
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 8c70324..6d0794c 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -567,6 +567,7 @@ int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void), unsigned long len);
int microcode_resume_cpu(int cpu);
void pv_cpuid(struct cpu_user_regs *regs);
+int emulate_forced_invalid_op(struct cpu_user_regs *regs, unsigned long *);
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_X86_PROCESSOR_H */
diff --git a/xen/include/asm-x86/pvh.h b/xen/include/asm-x86/pvh.h
new file mode 100644
index 0000000..73e59d3
--- /dev/null
+++ b/xen/include/asm-x86/pvh.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_X86_PVH_H__
+#define __ASM_X86_PVH_H__
+
+int pvh_do_hypercall(struct cpu_user_regs *regs);
+
+#endif /* __ASM_X86_PVH_H__ */
--
1.7.2.3
next prev parent reply other threads:[~2013-04-23 21:25 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-04-23 21:25 [PATCH 00/17][V4]: PVH xen: version 4 patches Mukesh Rathor
2013-04-23 21:25 ` [PATCH 01/17] PVH xen: turn gdb_frames/gdt_ents into union Mukesh Rathor
2013-04-23 21:25 ` [PATCH 02/17] PVH xen: add XENMEM_add_to_physmap_range Mukesh Rathor
2013-04-23 21:25 ` [PATCH 03/17] PVH xen: create domctl_memory_mapping() function Mukesh Rathor
2013-04-24 7:01 ` Jan Beulich
2013-04-23 21:25 ` [PATCH 04/17] PVH xen: add params to read_segment_register Mukesh Rathor
2013-04-23 21:25 ` [PATCH 05/17] PVH xen: vmx realted preparatory changes for PVH Mukesh Rathor
2013-04-23 21:25 ` [PATCH 06/17] PVH xen: Introduce PVH guest type Mukesh Rathor
2013-04-24 7:07 ` Jan Beulich
2013-04-24 23:01 ` Mukesh Rathor
2013-04-25 8:28 ` Jan Beulich
2013-04-23 21:25 ` [PATCH 07/17] PVH xen: tools changes to create PVH domain Mukesh Rathor
2013-04-24 7:10 ` Jan Beulich
2013-04-24 23:02 ` Mukesh Rathor
2013-04-23 21:25 ` [PATCH 08/17] PVH xen: domain creation code changes Mukesh Rathor
2013-04-23 21:25 ` [PATCH 09/17] PVH xen: create PVH vmcs, and also initialization Mukesh Rathor
2013-04-24 7:42 ` Jan Beulich
2013-04-30 21:01 ` Mukesh Rathor
2013-04-30 21:04 ` Mukesh Rathor
2013-04-23 21:25 ` Mukesh Rathor [this message]
2013-04-24 8:47 ` [PATCH 10/17] PVH xen: introduce vmx_pvh.c and pvh.c Jan Beulich
2013-04-25 0:57 ` Mukesh Rathor
2013-04-25 8:36 ` Jan Beulich
2013-04-26 1:16 ` Mukesh Rathor
2013-04-26 1:58 ` Mukesh Rathor
2013-04-26 7:29 ` Jan Beulich
2013-04-26 7:20 ` Jan Beulich
2013-04-27 2:06 ` Mukesh Rathor
2013-05-01 0:51 ` Mukesh Rathor
2013-05-01 13:52 ` Jan Beulich
2013-05-02 1:10 ` Mukesh Rathor
2013-05-02 6:42 ` Jan Beulich
2013-05-03 1:03 ` Mukesh Rathor
2013-05-10 1:51 ` Mukesh Rathor
2013-05-10 7:07 ` Jan Beulich
2013-05-10 23:44 ` Mukesh Rathor
2013-05-02 1:17 ` Mukesh Rathor
2013-05-02 6:53 ` Jan Beulich
2013-05-03 0:40 ` Mukesh Rathor
2013-05-03 6:33 ` Jan Beulich
2013-05-04 1:40 ` Mukesh Rathor
2013-05-06 6:44 ` Jan Beulich
2013-05-07 1:25 ` Mukesh Rathor
2013-05-07 8:07 ` Jan Beulich
2013-05-11 0:30 ` Mukesh Rathor
2013-04-25 11:19 ` Tim Deegan
2013-04-23 21:26 ` [PATCH 11/17] PVH xen: some misc changes like mtrr, intr, msi Mukesh Rathor
2013-04-23 21:26 ` [PATCH 12/17] PVH xen: support invalid op, return PVH features etc Mukesh Rathor
2013-04-24 9:01 ` Jan Beulich
2013-04-25 1:01 ` Mukesh Rathor
2013-04-23 21:26 ` [PATCH 13/17] PVH xen: p2m related changes Mukesh Rathor
2013-04-25 11:28 ` Tim Deegan
2013-04-25 21:59 ` Mukesh Rathor
2013-04-26 8:53 ` Tim Deegan
2013-04-23 21:26 ` [PATCH 14/17] PVH xen: Add and remove foreign pages Mukesh Rathor
2013-04-25 11:38 ` Tim Deegan
2013-04-23 21:26 ` [PATCH 15/17] PVH xen: Miscellaneous changes Mukesh Rathor
2013-04-24 9:06 ` Jan Beulich
2013-05-10 1:54 ` Mukesh Rathor
2013-05-10 7:10 ` Jan Beulich
2013-04-23 21:26 ` [PATCH 16/17] PVH xen: elf and iommu related changes to prep for dom0 PVH Mukesh Rathor
2013-04-24 9:15 ` Jan Beulich
2013-05-14 1:16 ` Mukesh Rathor
2013-05-14 6:56 ` Jan Beulich
2013-05-14 19:14 ` Mukesh Rathor
2013-04-23 21:26 ` [PATCH 17/17] PVH xen: PVH dom0 creation Mukesh Rathor
2013-04-24 9:28 ` Jan Beulich
2013-04-26 1:18 ` Mukesh Rathor
2013-04-26 7:22 ` Jan Beulich
2013-05-10 1:53 ` Mukesh Rathor
2013-05-10 7:14 ` Jan Beulich
2013-05-15 1:18 ` Mukesh Rathor
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1366752366-16594-11-git-send-email-mukesh.rathor@oracle.com \
--to=mukesh.rathor@oracle.com \
--cc=Xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).