* [PATCH] x86: tighten checks in XEN_DOMCTL_memory_mapping handler
@ 2012-09-19 10:50 Jan Beulich
2012-09-19 11:24 ` Keir Fraser
0 siblings, 1 reply; 2+ messages in thread
From: Jan Beulich @ 2012-09-19 10:50 UTC (permalink / raw)
To: xen-devel
[-- Attachment #1: Type: text/plain, Size: 7548 bytes --]
Properly checking the MFN implies knowing the physical address width
supported by the platform, so to obtain this consistently the
respective code gets moved out of the MTRR subdir.
Btw., the model specific workaround in that code is likely unnecessary
- I believe those CPU models don't support 64-bit mode. But I wasn't
able to formally verify this, so I preferred to retain that code for
now.
But domctl code here also was lacking other error checks (as was,
looking at it again from that angle) the XEN_DOMCTL_ioport_mapping one.
Besides adding the missing checks, printing is also added for the case
where revoking access permissions didn't work (as that may have
implications for the host operator, e.g. wanting to not pass through
affected devices to another guest until the one previously using them
did actually die).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -36,6 +36,8 @@ integer_param("cpuid_mask_ext_edx", opt_
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+unsigned int paddr_bits __read_mostly = 36;
+
/*
* Default host IA32_CR_PAT value to cover all memory types.
* BIOS usually sets it to 0x07040600070406.
@@ -265,6 +267,8 @@ static void __cpuinit generic_identify(s
}
if ( xlvl >= 0x80000004 )
get_model_name(c); /* Default name */
+ if ( xlvl >= 0x80000008 )
+ paddr_bits = cpuid_eax(0x80000008) & 0xff;
}
/* Intel-defined flags: level 0x00000007 */
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -144,6 +144,11 @@ void __devinit early_intel_workaround(st
c->cpuid_level);
}
}
+
+ /* CPUID workaround for Intel 0F33/0F34 CPU */
+ if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
+ (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
+ paddr_bits = 36;
}
/*
--- a/xen/arch/x86/cpu/mtrr/main.c
+++ b/xen/arch/x86/cpu/mtrr/main.c
@@ -559,8 +559,6 @@ struct mtrr_value {
unsigned long lsize;
};
-unsigned int paddr_bits __read_mostly = 36;
-
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
*
@@ -572,31 +570,8 @@ void __init mtrr_bp_init(void)
{
if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops;
- size_or_mask = 0xff000000; /* 36 bits */
- size_and_mask = 0x00f00000;
-
- /* This is an AMD specific MSR, but we assume(hope?) that
- Intel will implement it to when they extend the address
- bus of the Xeon. */
- if (cpuid_eax(0x80000000) >= 0x80000008) {
- paddr_bits = cpuid_eax(0x80000008) & 0xff;
- /* CPUID workaround for Intel 0F33/0F34 CPU */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 0xF &&
- boot_cpu_data.x86_model == 0x3 &&
- (boot_cpu_data.x86_mask == 0x3 ||
- boot_cpu_data.x86_mask == 0x4))
- paddr_bits = 36;
-
- size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
- size_and_mask = ~size_or_mask & 0xfffff00000ULL;
- } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
- boot_cpu_data.x86 == 6) {
- /* VIA C* family have Intel style MTRRs, but
- don't support PAE */
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
+ size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
+ size_and_mask = ~size_or_mask & 0xfffff00000ULL;
}
if (mtrr_if) {
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -825,10 +825,12 @@ long arch_do_domctl(
unsigned long mfn = domctl->u.memory_mapping.first_mfn;
unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
int add = domctl->u.memory_mapping.add_mapping;
- int i;
+ unsigned long i;
ret = -EINVAL;
- if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
+ if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
+ ((mfn | (mfn + nr_mfns - 1)) >> (paddr_bits - PAGE_SHIFT)) ||
+ (gfn + nr_mfns - 1) < gfn ) /* wrap? */
break;
ret = -EPERM;
@@ -853,8 +855,25 @@ long arch_do_domctl(
d->domain_id, gfn, mfn, nr_mfns);
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
- for ( i = 0; i < nr_mfns; i++ )
- set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
+ if ( !ret && paging_mode_translate(d) )
+ {
+ for ( i = 0; !ret && i < nr_mfns; i++ )
+ if ( !set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i)) )
+ ret = -EIO;
+ if ( ret )
+ {
+ printk(XENLOG_G_WARNING
+ "memory_map:fail: dom%d gfn=%lx mfn=%lx\n",
+ d->domain_id, gfn + i, mfn + i);
+ while ( i-- )
+ clear_mmio_p2m_entry(d, gfn + i);
+ if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
+ IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "memory_map: failed to deny dom%d access to [%lx,%lx]\n",
+ d->domain_id, mfn, mfn + nr_mfns - 1);
+ }
+ }
}
else
{
@@ -862,9 +881,17 @@ long arch_do_domctl(
"memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
d->domain_id, gfn, mfn, nr_mfns);
- for ( i = 0; i < nr_mfns; i++ )
- clear_mmio_p2m_entry(d, gfn+i);
+ if ( paging_mode_translate(d) )
+ for ( i = 0; i < nr_mfns; i++ )
+ add |= !clear_mmio_p2m_entry(d, gfn + i);
ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+ if ( !ret && add )
+ ret = -EIO;
+ if ( ret && IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "memory_map: error %ld %s dom%d access to [%lx,%lx]\n",
+ ret, add ? "removing" : "denying", d->domain_id,
+ mfn, mfn + nr_mfns - 1);
}
rcu_unlock_domain(d);
@@ -926,12 +953,23 @@ long arch_do_domctl(
if ( !found )
{
g2m_ioport = xmalloc(struct g2m_ioport);
+ if ( !g2m_ioport )
+ ret = -ENOMEM;
+ }
+ if ( !found && !ret )
+ {
g2m_ioport->gport = fgp;
g2m_ioport->mport = fmp;
g2m_ioport->np = np;
list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
}
- ret = ioports_permit_access(d, fmp, fmp + np - 1);
+ if ( !ret )
+ ret = ioports_permit_access(d, fmp, fmp + np - 1);
+ if ( ret && !found && g2m_ioport )
+ {
+ list_del(&g2m_ioport->list);
+ xfree(g2m_ioport);
+ }
}
else
{
@@ -946,6 +984,10 @@ long arch_do_domctl(
break;
}
ret = ioports_deny_access(d, fmp, fmp + np - 1);
+ if ( ret && IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "ioport_map: error %ld denying dom%d access to [%x,%x]\n",
+ ret, d->domain_id, fmp, fmp + np - 1);
}
rcu_unlock_domain(d);
}
[-- Attachment #2: x86-domctl-memory-mapping-range.patch --]
[-- Type: text/plain, Size: 7604 bytes --]
x86: tighten checks in XEN_DOMCTL_memory_mapping handler
Properly checking the MFN implies knowing the physical address width
supported by the platform, so to obtain this consistently the
respective code gets moved out of the MTRR subdir.
Btw., the model specific workaround in that code is likely unnecessary
- I believe those CPU models don't support 64-bit mode. But I wasn't
able to formally verify this, so I preferred to retain that code for
now.
But domctl code here also was lacking other error checks (as was,
looking at it again from that angle) the XEN_DOMCTL_ioport_mapping one.
Besides adding the missing checks, printing is also added for the case
where revoking access permissions didn't work (as that may have
implications for the host operator, e.g. wanting to not pass through
affected devices to another guest until the one previously using them
did actually die).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -36,6 +36,8 @@ integer_param("cpuid_mask_ext_edx", opt_
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+unsigned int paddr_bits __read_mostly = 36;
+
/*
* Default host IA32_CR_PAT value to cover all memory types.
* BIOS usually sets it to 0x07040600070406.
@@ -265,6 +267,8 @@ static void __cpuinit generic_identify(s
}
if ( xlvl >= 0x80000004 )
get_model_name(c); /* Default name */
+ if ( xlvl >= 0x80000008 )
+ paddr_bits = cpuid_eax(0x80000008) & 0xff;
}
/* Intel-defined flags: level 0x00000007 */
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -144,6 +144,11 @@ void __devinit early_intel_workaround(st
c->cpuid_level);
}
}
+
+ /* CPUID workaround for Intel 0F33/0F34 CPU */
+ if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
+ (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
+ paddr_bits = 36;
}
/*
--- a/xen/arch/x86/cpu/mtrr/main.c
+++ b/xen/arch/x86/cpu/mtrr/main.c
@@ -559,8 +559,6 @@ struct mtrr_value {
unsigned long lsize;
};
-unsigned int paddr_bits __read_mostly = 36;
-
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
*
@@ -572,31 +570,8 @@ void __init mtrr_bp_init(void)
{
if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops;
- size_or_mask = 0xff000000; /* 36 bits */
- size_and_mask = 0x00f00000;
-
- /* This is an AMD specific MSR, but we assume(hope?) that
- Intel will implement it to when they extend the address
- bus of the Xeon. */
- if (cpuid_eax(0x80000000) >= 0x80000008) {
- paddr_bits = cpuid_eax(0x80000008) & 0xff;
- /* CPUID workaround for Intel 0F33/0F34 CPU */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 0xF &&
- boot_cpu_data.x86_model == 0x3 &&
- (boot_cpu_data.x86_mask == 0x3 ||
- boot_cpu_data.x86_mask == 0x4))
- paddr_bits = 36;
-
- size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
- size_and_mask = ~size_or_mask & 0xfffff00000ULL;
- } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
- boot_cpu_data.x86 == 6) {
- /* VIA C* family have Intel style MTRRs, but
- don't support PAE */
- size_or_mask = 0xfff00000; /* 32 bits */
- size_and_mask = 0;
- }
+ size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
+ size_and_mask = ~size_or_mask & 0xfffff00000ULL;
}
if (mtrr_if) {
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -825,10 +825,12 @@ long arch_do_domctl(
unsigned long mfn = domctl->u.memory_mapping.first_mfn;
unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
int add = domctl->u.memory_mapping.add_mapping;
- int i;
+ unsigned long i;
ret = -EINVAL;
- if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
+ if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
+ ((mfn | (mfn + nr_mfns - 1)) >> (paddr_bits - PAGE_SHIFT)) ||
+ (gfn + nr_mfns - 1) < gfn ) /* wrap? */
break;
ret = -EPERM;
@@ -853,8 +855,25 @@ long arch_do_domctl(
d->domain_id, gfn, mfn, nr_mfns);
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
- for ( i = 0; i < nr_mfns; i++ )
- set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
+ if ( !ret && paging_mode_translate(d) )
+ {
+ for ( i = 0; !ret && i < nr_mfns; i++ )
+ if ( !set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i)) )
+ ret = -EIO;
+ if ( ret )
+ {
+ printk(XENLOG_G_WARNING
+ "memory_map:fail: dom%d gfn=%lx mfn=%lx\n",
+ d->domain_id, gfn + i, mfn + i);
+ while ( i-- )
+ clear_mmio_p2m_entry(d, gfn + i);
+ if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
+ IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "memory_map: failed to deny dom%d access to [%lx,%lx]\n",
+ d->domain_id, mfn, mfn + nr_mfns - 1);
+ }
+ }
}
else
{
@@ -862,9 +881,17 @@ long arch_do_domctl(
"memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
d->domain_id, gfn, mfn, nr_mfns);
- for ( i = 0; i < nr_mfns; i++ )
- clear_mmio_p2m_entry(d, gfn+i);
+ if ( paging_mode_translate(d) )
+ for ( i = 0; i < nr_mfns; i++ )
+ add |= !clear_mmio_p2m_entry(d, gfn + i);
ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+ if ( !ret && add )
+ ret = -EIO;
+ if ( ret && IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "memory_map: error %ld %s dom%d access to [%lx,%lx]\n",
+ ret, add ? "removing" : "denying", d->domain_id,
+ mfn, mfn + nr_mfns - 1);
}
rcu_unlock_domain(d);
@@ -926,12 +953,23 @@ long arch_do_domctl(
if ( !found )
{
g2m_ioport = xmalloc(struct g2m_ioport);
+ if ( !g2m_ioport )
+ ret = -ENOMEM;
+ }
+ if ( !found && !ret )
+ {
g2m_ioport->gport = fgp;
g2m_ioport->mport = fmp;
g2m_ioport->np = np;
list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
}
- ret = ioports_permit_access(d, fmp, fmp + np - 1);
+ if ( !ret )
+ ret = ioports_permit_access(d, fmp, fmp + np - 1);
+ if ( ret && !found && g2m_ioport )
+ {
+ list_del(&g2m_ioport->list);
+ xfree(g2m_ioport);
+ }
}
else
{
@@ -946,6 +984,10 @@ long arch_do_domctl(
break;
}
ret = ioports_deny_access(d, fmp, fmp + np - 1);
+ if ( ret && IS_PRIV(current->domain) )
+ printk(XENLOG_ERR
+ "ioport_map: error %ld denying dom%d access to [%x,%x]\n",
+ ret, d->domain_id, fmp, fmp + np - 1);
}
rcu_unlock_domain(d);
}
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] x86: tighten checks in XEN_DOMCTL_memory_mapping handler
2012-09-19 10:50 [PATCH] x86: tighten checks in XEN_DOMCTL_memory_mapping handler Jan Beulich
@ 2012-09-19 11:24 ` Keir Fraser
0 siblings, 0 replies; 2+ messages in thread
From: Keir Fraser @ 2012-09-19 11:24 UTC (permalink / raw)
To: Jan Beulich, xen-devel
On 19/09/2012 11:50, "Jan Beulich" <JBeulich@suse.com> wrote:
> Properly checking the MFN implies knowing the physical address width
> supported by the platform, so to obtain this consistently the
> respective code gets moved out of the MTRR subdir.
>
> Btw., the model specific workaround in that code is likely unnecessary
> - I believe those CPU models don't support 64-bit mode. But I wasn't
> able to formally verify this, so I preferred to retain that code for
> now.
>
> But domctl code here also was lacking other error checks (as was,
> looking at it again from that angle) the XEN_DOMCTL_ioport_mapping one.
> Besides adding the missing checks, printing is also added for the case
> where revoking access permissions didn't work (as that may have
> implications for the host operator, e.g. wanting to not pass through
> affected devices to another guest until the one previously using them
> did actually die).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
> --- a/xen/arch/x86/cpu/common.c
> +++ b/xen/arch/x86/cpu/common.c
> @@ -36,6 +36,8 @@ integer_param("cpuid_mask_ext_edx", opt_
>
> struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
>
> +unsigned int paddr_bits __read_mostly = 36;
> +
> /*
> * Default host IA32_CR_PAT value to cover all memory types.
> * BIOS usually sets it to 0x07040600070406.
> @@ -265,6 +267,8 @@ static void __cpuinit generic_identify(s
> }
> if ( xlvl >= 0x80000004 )
> get_model_name(c); /* Default name */
> + if ( xlvl >= 0x80000008 )
> + paddr_bits = cpuid_eax(0x80000008) & 0xff;
> }
>
> /* Intel-defined flags: level 0x00000007 */
> --- a/xen/arch/x86/cpu/intel.c
> +++ b/xen/arch/x86/cpu/intel.c
> @@ -144,6 +144,11 @@ void __devinit early_intel_workaround(st
> c->cpuid_level);
> }
> }
> +
> + /* CPUID workaround for Intel 0F33/0F34 CPU */
> + if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
> + (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
> + paddr_bits = 36;
> }
>
> /*
> --- a/xen/arch/x86/cpu/mtrr/main.c
> +++ b/xen/arch/x86/cpu/mtrr/main.c
> @@ -559,8 +559,6 @@ struct mtrr_value {
> unsigned long lsize;
> };
>
> -unsigned int paddr_bits __read_mostly = 36;
> -
> /**
> * mtrr_bp_init - initialize mtrrs on the boot CPU
> *
> @@ -572,31 +570,8 @@ void __init mtrr_bp_init(void)
> {
> if (cpu_has_mtrr) {
> mtrr_if = &generic_mtrr_ops;
> - size_or_mask = 0xff000000; /* 36 bits */
> - size_and_mask = 0x00f00000;
> -
> - /* This is an AMD specific MSR, but we assume(hope?) that
> - Intel will implement it to when they extend the address
> - bus of the Xeon. */
> - if (cpuid_eax(0x80000000) >= 0x80000008) {
> - paddr_bits = cpuid_eax(0x80000008) & 0xff;
> - /* CPUID workaround for Intel 0F33/0F34 CPU */
> - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
> - boot_cpu_data.x86 == 0xF &&
> - boot_cpu_data.x86_model == 0x3 &&
> - (boot_cpu_data.x86_mask == 0x3 ||
> - boot_cpu_data.x86_mask == 0x4))
> - paddr_bits = 36;
> -
> - size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
> - size_and_mask = ~size_or_mask & 0xfffff00000ULL;
> - } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
> - boot_cpu_data.x86 == 6) {
> - /* VIA C* family have Intel style MTRRs, but
> - don't support PAE */
> - size_or_mask = 0xfff00000; /* 32 bits */
> - size_and_mask = 0;
> - }
> + size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
> + size_and_mask = ~size_or_mask & 0xfffff00000ULL;
> }
>
> if (mtrr_if) {
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -825,10 +825,12 @@ long arch_do_domctl(
> unsigned long mfn = domctl->u.memory_mapping.first_mfn;
> unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
> int add = domctl->u.memory_mapping.add_mapping;
> - int i;
> + unsigned long i;
>
> ret = -EINVAL;
> - if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
> + if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
> + ((mfn | (mfn + nr_mfns - 1)) >> (paddr_bits - PAGE_SHIFT)) ||
> + (gfn + nr_mfns - 1) < gfn ) /* wrap? */
> break;
>
> ret = -EPERM;
> @@ -853,8 +855,25 @@ long arch_do_domctl(
> d->domain_id, gfn, mfn, nr_mfns);
>
> ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
> - for ( i = 0; i < nr_mfns; i++ )
> - set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
> + if ( !ret && paging_mode_translate(d) )
> + {
> + for ( i = 0; !ret && i < nr_mfns; i++ )
> + if ( !set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i)) )
> + ret = -EIO;
> + if ( ret )
> + {
> + printk(XENLOG_G_WARNING
> + "memory_map:fail: dom%d gfn=%lx mfn=%lx\n",
> + d->domain_id, gfn + i, mfn + i);
> + while ( i-- )
> + clear_mmio_p2m_entry(d, gfn + i);
> + if ( iomem_deny_access(d, mfn, mfn + nr_mfns - 1) &&
> + IS_PRIV(current->domain) )
> + printk(XENLOG_ERR
> + "memory_map: failed to deny dom%d access to
> [%lx,%lx]\n",
> + d->domain_id, mfn, mfn + nr_mfns - 1);
> + }
> + }
> }
> else
> {
> @@ -862,9 +881,17 @@ long arch_do_domctl(
> "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> d->domain_id, gfn, mfn, nr_mfns);
>
> - for ( i = 0; i < nr_mfns; i++ )
> - clear_mmio_p2m_entry(d, gfn+i);
> + if ( paging_mode_translate(d) )
> + for ( i = 0; i < nr_mfns; i++ )
> + add |= !clear_mmio_p2m_entry(d, gfn + i);
> ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
> + if ( !ret && add )
> + ret = -EIO;
> + if ( ret && IS_PRIV(current->domain) )
> + printk(XENLOG_ERR
> + "memory_map: error %ld %s dom%d access to
> [%lx,%lx]\n",
> + ret, add ? "removing" : "denying", d->domain_id,
> + mfn, mfn + nr_mfns - 1);
> }
>
> rcu_unlock_domain(d);
> @@ -926,12 +953,23 @@ long arch_do_domctl(
> if ( !found )
> {
> g2m_ioport = xmalloc(struct g2m_ioport);
> + if ( !g2m_ioport )
> + ret = -ENOMEM;
> + }
> + if ( !found && !ret )
> + {
> g2m_ioport->gport = fgp;
> g2m_ioport->mport = fmp;
> g2m_ioport->np = np;
> list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
> }
> - ret = ioports_permit_access(d, fmp, fmp + np - 1);
> + if ( !ret )
> + ret = ioports_permit_access(d, fmp, fmp + np - 1);
> + if ( ret && !found && g2m_ioport )
> + {
> + list_del(&g2m_ioport->list);
> + xfree(g2m_ioport);
> + }
> }
> else
> {
> @@ -946,6 +984,10 @@ long arch_do_domctl(
> break;
> }
> ret = ioports_deny_access(d, fmp, fmp + np - 1);
> + if ( ret && IS_PRIV(current->domain) )
> + printk(XENLOG_ERR
> + "ioport_map: error %ld denying dom%d access to
> [%x,%x]\n",
> + ret, d->domain_id, fmp, fmp + np - 1);
> }
> rcu_unlock_domain(d);
> }
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2012-09-19 11:24 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-09-19 10:50 [PATCH] x86: tighten checks in XEN_DOMCTL_memory_mapping handler Jan Beulich
2012-09-19 11:24 ` Keir Fraser
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).