From: Haozhong Zhang <haozhong.zhang@intel.com>
To: xen-devel@lists.xen.org
Cc: Haozhong Zhang <haozhong.zhang@intel.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Jan Beulich <jbeulich@suse.com>,
Chao Peng <chao.p.peng@linux.intel.com>,
Dan Williams <dan.j.williams@intel.com>
Subject: [RFC XEN PATCH v3 14/39] x86_64/mm: refactor memory_add()
Date: Mon, 11 Sep 2017 12:37:55 +0800 [thread overview]
Message-ID: <20170911043820.14617-15-haozhong.zhang@intel.com> (raw)
In-Reply-To: <20170911043820.14617-1-haozhong.zhang@intel.com>
Separate the revertible part of memory_add_common(), which will also
be used in PMEM management. The separation will ease the failure
recovery in PMEM management. Several coding-style issues in the
touched code are fixed as well.
No functional change is introduced.
Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/x86_64/mm.c | 98 +++++++++++++++++++++++++++---------------------
1 file changed, 56 insertions(+), 42 deletions(-)
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index f635e4bf70..c8ffafe8a8 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1337,21 +1337,16 @@ static int mem_hotadd_check(unsigned long spfn, unsigned long epfn)
return 1;
}
-/*
- * A bit paranoid for memory allocation failure issue since
- * it may be reason for memory add
- */
-int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
+static int memory_add_common(struct mem_hotadd_info *info,
+ unsigned int pxm, bool direct_map)
{
- struct mem_hotadd_info info;
+ unsigned long spfn = info->spfn, epfn = info->epfn;
int ret;
nodeid_t node;
unsigned long old_max = max_page, old_total = total_pages;
unsigned long old_node_start, old_node_span, orig_online;
unsigned long i;
- dprintk(XENLOG_INFO, "memory_add %lx ~ %lx with pxm %x\n", spfn, epfn, pxm);
-
if ( !mem_hotadd_check(spfn, epfn) )
return -EINVAL;
@@ -1366,22 +1361,25 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
return -EINVAL;
}
- i = virt_to_mfn(HYPERVISOR_VIRT_END - 1) + 1;
- if ( spfn < i )
- {
- ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), spfn,
- min(epfn, i) - spfn, PAGE_HYPERVISOR);
- if ( ret )
- goto destroy_directmap;
- }
- if ( i < epfn )
+ if ( direct_map )
{
- if ( i < spfn )
- i = spfn;
- ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), i,
- epfn - i, __PAGE_HYPERVISOR_RW);
- if ( ret )
- goto destroy_directmap;
+ i = virt_to_mfn(HYPERVISOR_VIRT_END - 1) + 1;
+ if ( spfn < i )
+ {
+ ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), spfn,
+ min(epfn, i) - spfn, PAGE_HYPERVISOR);
+ if ( ret )
+ goto destroy_directmap;
+ }
+ if ( i < epfn )
+ {
+ if ( i < spfn )
+ i = spfn;
+ ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), i,
+ epfn - i, __PAGE_HYPERVISOR_RW);
+ if ( ret )
+ goto destroy_directmap;
+ }
}
old_node_start = node_start_pfn(node);
@@ -1398,22 +1396,18 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
}
else
{
- if (node_start_pfn(node) > spfn)
+ if ( node_start_pfn(node) > spfn )
NODE_DATA(node)->node_start_pfn = spfn;
- if (node_end_pfn(node) < epfn)
+ if ( node_end_pfn(node) < epfn )
NODE_DATA(node)->node_spanned_pages = epfn - node_start_pfn(node);
}
- info.spfn = spfn;
- info.epfn = epfn;
- info.cur = spfn;
-
- ret = extend_frame_table(&info);
+ ret = extend_frame_table(info);
if ( ret )
goto restore_node_status;
/* Set max_page as setup_m2p_table will use it*/
- if (max_page < epfn)
+ if ( max_page < epfn )
{
max_page = epfn;
max_pdx = pfn_to_pdx(max_page - 1) + 1;
@@ -1421,7 +1415,7 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
total_pages += epfn - spfn;
set_pdx_range(spfn, epfn);
- ret = setup_m2p_table(&info);
+ ret = setup_m2p_table(info);
if ( ret )
goto destroy_m2p;
@@ -1429,11 +1423,12 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
if ( iommu_enabled && !iommu_passthrough && !need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(hardware_domain, i, i, IOMMUF_readable|IOMMUF_writable) )
+ if ( iommu_map_page(hardware_domain, i, i,
+ IOMMUF_readable|IOMMUF_writable) )
break;
if ( i != epfn )
{
- while (i-- > old_max)
+ while ( i-- > old_max )
/* If statement to satisfy __must_check. */
if ( iommu_unmap_page(hardware_domain, i) )
continue;
@@ -1442,14 +1437,10 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
}
}
- /* We can't revert any more */
- share_hotadd_m2p_table(&info);
- transfer_pages_to_heap(&info);
-
return 0;
destroy_m2p:
- destroy_m2p_mapping(&info);
+ destroy_m2p_mapping(info);
max_page = old_max;
total_pages = old_total;
max_pdx = pfn_to_pdx(max_page - 1) + 1;
@@ -1459,9 +1450,32 @@ restore_node_status:
node_set_offline(node);
NODE_DATA(node)->node_start_pfn = old_node_start;
NODE_DATA(node)->node_spanned_pages = old_node_span;
- destroy_directmap:
- destroy_xen_mappings((unsigned long)mfn_to_virt(spfn),
- (unsigned long)mfn_to_virt(epfn));
+destroy_directmap:
+ if ( direct_map )
+ destroy_xen_mappings((unsigned long)mfn_to_virt(spfn),
+ (unsigned long)mfn_to_virt(epfn));
+
+ return ret;
+}
+
+/*
+ * A bit paranoid for memory allocation failure issue since
+ * it may be reason for memory add
+ */
+int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
+{
+ struct mem_hotadd_info info = { .spfn = spfn, .epfn = epfn, .cur = spfn };
+ int ret;
+
+ dprintk(XENLOG_INFO, "memory_add %lx ~ %lx with pxm %x\n", spfn, epfn, pxm);
+
+ ret = memory_add_common(&info, pxm, true);
+ if ( !ret )
+ {
+ /* We can't revert any more */
+ share_hotadd_m2p_table(&info);
+ transfer_pages_to_heap(&info);
+ }
return ret;
}
--
2.14.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-09-11 4:37 UTC|newest]
Thread overview: 95+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-09-11 4:37 [RFC XEN PATCH v3 00/39] Add vNVDIMM support to HVM domains Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 01/39] x86_64/mm: fix the PDX group check in mem_hotadd_check() Haozhong Zhang
2017-10-27 6:49 ` Chao Peng
2017-10-27 7:02 ` Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 02/39] x86_64/mm: drop redundant MFN to page conventions in cleanup_frame_table() Haozhong Zhang
2017-10-27 6:58 ` Chao Peng
2017-10-27 9:24 ` Andrew Cooper
2017-10-30 2:21 ` Chao Peng
2017-09-11 4:37 ` [RFC XEN PATCH v3 03/39] x86_64/mm: avoid cleaning the unmapped frame table Haozhong Zhang
2017-10-27 8:10 ` Chao Peng
2017-09-11 4:37 ` [RFC XEN PATCH v3 04/39] xen/common: add Kconfig item for pmem support Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 05/39] x86/mm: exclude PMEM regions from initial frametable Haozhong Zhang
2017-11-03 5:58 ` Chao Peng
2017-11-03 6:39 ` Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 06/39] acpi: probe valid PMEM regions via NFIT Haozhong Zhang
2017-11-03 6:15 ` Chao Peng
2017-11-03 7:14 ` Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 07/39] xen/pmem: register valid PMEM regions to Xen hypervisor Haozhong Zhang
2017-11-03 6:26 ` Chao Peng
2017-09-11 4:37 ` [RFC XEN PATCH v3 08/39] xen/pmem: hide NFIT and deny access to PMEM from Dom0 Haozhong Zhang
2017-11-03 6:51 ` Chao Peng
2017-11-03 7:24 ` Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 09/39] xen/pmem: add framework for hypercall XEN_SYSCTL_nvdimm_op Haozhong Zhang
2017-11-03 7:40 ` Chao Peng
2017-11-03 8:54 ` Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 10/39] xen/pmem: add XEN_SYSCTL_nvdimm_pmem_get_rgions_nr Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 11/39] xen/pmem: add XEN_SYSCTL_nvdimm_pmem_get_regions Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 12/39] tools/xen-ndctl: add NVDIMM management util 'xen-ndctl' Haozhong Zhang
2017-09-11 5:10 ` Dan Williams
2017-09-11 5:39 ` Haozhong Zhang
2017-09-11 16:35 ` Dan Williams
2017-09-11 21:24 ` Konrad Rzeszutek Wilk
2017-09-13 17:45 ` Dan Williams
2017-09-11 4:37 ` [RFC XEN PATCH v3 13/39] tools/xen-ndctl: add command 'list' Haozhong Zhang
2017-09-11 4:37 ` Haozhong Zhang [this message]
2017-09-11 4:37 ` [RFC XEN PATCH v3 15/39] x86_64/mm: allow customized location of extended frametable and M2P table Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 16/39] xen/pmem: add XEN_SYSCTL_nvdimm_pmem_setup to setup management PMEM region Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 17/39] tools/xen-ndctl: add command 'setup-mgmt' Haozhong Zhang
2017-09-11 4:37 ` [RFC XEN PATCH v3 18/39] xen/pmem: support PMEM_REGION_TYPE_MGMT for XEN_SYSCTL_nvdimm_pmem_get_regions_nr Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 19/39] xen/pmem: support PMEM_REGION_TYPE_MGMT for XEN_SYSCTL_nvdimm_pmem_get_regions Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 20/39] tools/xen-ndctl: add option '--mgmt' to command 'list' Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 21/39] xen/pmem: support setup PMEM region for guest data usage Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 22/39] tools/xen-ndctl: add command 'setup-data' Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 23/39] xen/pmem: support PMEM_REGION_TYPE_DATA for XEN_SYSCTL_nvdimm_pmem_get_regions_nr Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 24/39] xen/pmem: support PMEM_REGION_TYPE_DATA for XEN_SYSCTL_nvdimm_pmem_get_regions Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 25/39] tools/xen-ndctl: add option '--data' to command 'list' Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 26/39] xen/pmem: add function to map PMEM pages to HVM domain Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 27/39] xen/pmem: release PMEM pages on HVM domain destruction Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 28/39] xen: add hypercall XENMEM_populate_pmem_map Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 29/39] tools: reserve guest memory for ACPI from device model Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 30/39] tools/libacpi: expose the minimum alignment used by mem_ops.alloc Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 31/39] tools/libacpi: add callback to translate GPA to GVA Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 32/39] tools/libacpi: add callbacks to access XenStore Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 33/39] tools/libacpi: add a simple AML builder Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 34/39] tools/libacpi: add DM ACPI blacklists Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 35/39] tools/libacpi: load ACPI built by the device model Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 36/39] tools/xl: add xl domain configuration for virtual NVDIMM devices Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 37/39] tools/libxl: allow aborting domain creation on fatal QMP init errors Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 38/39] tools/libxl: initiate PMEM mapping via QMP callback Haozhong Zhang
2017-09-11 4:38 ` [RFC XEN PATCH v3 39/39] tools/libxl: build qemu options from xl vNVDIMM configs Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 00/10] Implement vNVDIMM for Xen HVM guest Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 01/10] nvdimm: do not intiailize nvdimm->label_data if label size is zero Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 02/10] hw/xen-hvm: create the hotplug memory region on Xen Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 03/10] hostmem-xen: add a host memory backend for Xen Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 04/10] nvdimm acpi: do not use fw_cfg on Xen Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 05/10] hw/xen-hvm: initialize DM ACPI Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 06/10] hw/xen-hvm: add function to copy ACPI into guest memory Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 07/10] nvdimm acpi: copy NFIT to Xen guest Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 08/10] nvdimm acpi: copy ACPI namespace device of vNVDIMM " Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 09/10] nvdimm acpi: do not build _FIT method on Xen Haozhong Zhang
2017-09-11 4:41 ` [RFC QEMU PATCH v3 10/10] hw/xen-hvm: enable building DM ACPI if vNVDIMM is enabled Haozhong Zhang
2017-09-11 4:53 ` [Qemu-devel] [RFC QEMU PATCH v3 00/10] Implement vNVDIMM for Xen HVM guest no-reply
2017-09-11 14:08 ` Igor Mammedov
2017-09-11 18:52 ` Stefano Stabellini
2017-09-12 3:15 ` Haozhong Zhang
2017-10-10 16:05 ` Konrad Rzeszutek Wilk
2017-10-12 12:45 ` [Qemu-devel] " Haozhong Zhang
2017-10-12 15:45 ` Paolo Bonzini
2017-10-13 7:53 ` Haozhong Zhang
2017-10-13 8:44 ` Igor Mammedov
2017-10-13 11:13 ` Haozhong Zhang
2017-10-13 12:13 ` Jan Beulich
2017-10-13 22:46 ` Stefano Stabellini
2017-10-15 0:31 ` Michael S. Tsirkin
2017-10-16 14:49 ` Konrad Rzeszutek Wilk
2017-10-17 11:45 ` Paolo Bonzini
2017-10-17 12:16 ` Haozhong Zhang
2017-10-18 8:32 ` Roger Pau Monné
2017-10-18 8:46 ` Paolo Bonzini
2017-10-18 8:55 ` Roger Pau Monné
2017-10-15 0:35 ` Michael S. Tsirkin
2017-10-12 17:39 ` Konrad Rzeszutek Wilk
2017-10-13 8:00 ` Haozhong Zhang
2017-10-27 3:26 ` [RFC XEN PATCH v3 00/39] Add vNVDIMM support to HVM domains Chao Peng
2017-10-27 4:25 ` Haozhong Zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170911043820.14617-15-haozhong.zhang@intel.com \
--to=haozhong.zhang@intel.com \
--cc=andrew.cooper3@citrix.com \
--cc=chao.p.peng@linux.intel.com \
--cc=dan.j.williams@intel.com \
--cc=jbeulich@suse.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).