From: Jean Guyader <jean.guyader@eu.citrix.com>
To: xen-devel@lists.xensource.com
Cc: allen.m.kay@intel.com, tim@xen.org,
Jean Guyader <jean.guyader@eu.citrix.com>
Subject: [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap.
Date: Tue, 8 Nov 2011 20:04:25 +0000 [thread overview]
Message-ID: <1320782668-18109-4-git-send-email-jean.guyader@eu.citrix.com> (raw)
In-Reply-To: <1320782668-18109-3-git-send-email-jean.guyader@eu.citrix.com>
[-- Attachment #1: Type: text/plain, Size: 293 bytes --]
Move the code for the XENMEM_add_to_physmap case into it's own
function (xenmem_add_to_physmap).
Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
---
xen/arch/x86/mm.c | 188 ++++++++++++++++++++++++++++-------------------------
1 files changed, 99 insertions(+), 89 deletions(-)
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch --]
[-- Type: text/x-patch; name="0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch", Size: 7065 bytes --]
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index acc1f34..f75011e 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4592,119 +4592,129 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
return 0;
}
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xatp)
{
struct page_info *page = NULL;
+ unsigned long prev_mfn, mfn = 0, gpfn;
int rc;
- switch ( op )
- {
- case XENMEM_add_to_physmap:
+ switch ( xatp.space )
{
- struct xen_add_to_physmap xatp;
- unsigned long prev_mfn, mfn = 0, gpfn;
- struct domain *d;
-
- if ( copy_from_guest(&xatp, arg, 1) )
- return -EFAULT;
+ case XENMAPSPACE_shared_info:
+ if ( xatp.idx == 0 )
+ mfn = virt_to_mfn(d->shared_info);
+ break;
+ case XENMAPSPACE_grant_table:
+ spin_lock(&d->grant_table->lock);
- rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
- if ( rc != 0 )
- return rc;
+ if ( d->grant_table->gt_version == 0 )
+ d->grant_table->gt_version = 1;
- if ( xsm_add_to_physmap(current->domain, d) )
+ if ( d->grant_table->gt_version == 2 &&
+ (xatp.idx & XENMAPIDX_grant_table_status) )
{
- rcu_unlock_domain(d);
- return -EPERM;
+ xatp.idx &= ~XENMAPIDX_grant_table_status;
+ if ( xatp.idx < nr_status_frames(d->grant_table) )
+ mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
+ }
+ else
+ {
+ if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
+ (xatp.idx < max_nr_grant_frames) )
+ gnttab_grow_table(d, xatp.idx + 1);
+
+ if ( xatp.idx < nr_grant_frames(d->grant_table) )
+ mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
}
- switch ( xatp.space )
+ spin_unlock(&d->grant_table->lock);
+ break;
+ case XENMAPSPACE_gmfn:
+ {
+ p2m_type_t p2mt;
+
+ xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
+ /* If the page is still shared, exit early */
+ if ( p2m_is_shared(p2mt) )
{
- case XENMAPSPACE_shared_info:
- if ( xatp.idx == 0 )
- mfn = virt_to_mfn(d->shared_info);
+ rcu_unlock_domain(d);
+ return -ENOMEM;
+ }
+ if ( !get_page_from_pagenr(xatp.idx, d) )
break;
- case XENMAPSPACE_grant_table:
- spin_lock(&d->grant_table->lock);
+ mfn = xatp.idx;
+ page = mfn_to_page(mfn);
+ break;
+ }
+ default:
+ break;
+ }
- if ( d->grant_table->gt_version == 0 )
- d->grant_table->gt_version = 1;
+ if ( !paging_mode_translate(d) || (mfn == 0) )
+ {
+ if ( page )
+ put_page(page);
+ rcu_unlock_domain(d);
+ return -EINVAL;
+ }
- if ( d->grant_table->gt_version == 2 &&
- (xatp.idx & XENMAPIDX_grant_table_status) )
- {
- xatp.idx &= ~XENMAPIDX_grant_table_status;
- if ( xatp.idx < nr_status_frames(d->grant_table) )
- mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
- }
- else
- {
- if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
- (xatp.idx < max_nr_grant_frames) )
- gnttab_grow_table(d, xatp.idx + 1);
+ domain_lock(d);
- if ( xatp.idx < nr_grant_frames(d->grant_table) )
- mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
- }
+ if ( page )
+ put_page(page);
- spin_unlock(&d->grant_table->lock);
- break;
- case XENMAPSPACE_gmfn:
- {
- p2m_type_t p2mt;
+ /* Remove previously mapped page if it was present. */
+ prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+ if ( mfn_valid(prev_mfn) )
+ {
+ if ( is_xen_heap_mfn(prev_mfn) )
+ /* Xen heap frames are simply unhooked from this phys slot. */
+ guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
+ else
+ /* Normal domain memory is freed, to avoid leaking memory. */
+ guest_remove_page(d, xatp.gpfn);
+ }
- xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
- /* If the page is still shared, exit early */
- if ( p2m_is_shared(p2mt) )
- {
- rcu_unlock_domain(d);
- return -ENOMEM;
- }
- if ( !get_page_from_pagenr(xatp.idx, d) )
- break;
- mfn = xatp.idx;
- page = mfn_to_page(mfn);
- break;
- }
- default:
- break;
- }
+ /* Unmap from old location, if any. */
+ gpfn = get_gpfn_from_mfn(mfn);
+ ASSERT( gpfn != SHARED_M2P_ENTRY );
+ if ( gpfn != INVALID_M2P_ENTRY )
+ guest_physmap_remove_page(d, gpfn, mfn, 0);
- if ( !paging_mode_translate(d) || (mfn == 0) )
- {
- if ( page )
- put_page(page);
- rcu_unlock_domain(d);
- return -EINVAL;
- }
+ /* Map at new location. */
+ rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
- domain_lock(d);
+ domain_unlock(d);
- if ( page )
- put_page(page);
+ return rc;
+}
- /* Remove previously mapped page if it was present. */
- prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
- if ( mfn_valid(prev_mfn) )
- {
- if ( is_xen_heap_mfn(prev_mfn) )
- /* Xen heap frames are simply unhooked from this phys slot. */
- guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
- else
- /* Normal domain memory is freed, to avoid leaking memory. */
- guest_remove_page(d, xatp.gpfn);
- }
- /* Unmap from old location, if any. */
- gpfn = get_gpfn_from_mfn(mfn);
- ASSERT( gpfn != SHARED_M2P_ENTRY );
- if ( gpfn != INVALID_M2P_ENTRY )
- guest_physmap_remove_page(d, gpfn, mfn, 0);
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+ int rc;
+
+ switch ( op )
+ {
+ case XENMEM_add_to_physmap:
+ {
+ struct xen_add_to_physmap xatp;
+ struct domain *d;
- /* Map at new location. */
- rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
+ if ( copy_from_guest(&xatp, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ if ( xsm_add_to_physmap(current->domain, d) )
+ {
+ rcu_unlock_domain(d);
+ return -EPERM;
+ }
- domain_unlock(d);
+ xenmem_add_to_physmap(d, xatp);
rcu_unlock_domain(d);
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next prev parent reply other threads:[~2011-11-08 20:04 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-11-08 20:04 [PATCH 0/6] IOMMU, vtd and iotlb flush rework (v4) Jean Guyader
2011-11-08 20:04 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-08 20:04 ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-08 20:04 ` Jean Guyader [this message]
2011-11-08 20:04 ` [PATCH 4/6] mm: New XENMEM space, XENMAPSPACE_gmfn_range Jean Guyader
2011-11-08 20:04 ` [PATCH 5/6] hvmloader: Change memory relocation loop when overlap with PCI hole Jean Guyader
2011-11-08 20:04 ` [PATCH 6/6] Introduce per cpu flag (iommu_dont_flush_iotlb) to avoid unnecessary iotlb flush Jean Guyader
2011-11-09 10:09 ` [PATCH 4/6] mm: New XENMEM space, XENMAPSPACE_gmfn_range Tim Deegan
-- strict thread matches above, loose matches on Subject: below --
2011-11-16 19:25 [PATCH 0/6] IOMMU, vtd and iotlb flush rework (v8) Jean Guyader
2011-11-16 19:25 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-16 19:25 ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-16 19:25 ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-19 21:58 ` Olaf Hering
2011-11-19 22:14 ` Keir Fraser
2011-11-19 22:37 ` Jean Guyader
2011-11-20 13:25 ` Olaf Hering
2011-11-20 19:59 ` Keir Fraser
2011-11-21 8:39 ` Olaf Hering
[not found] <20111113175006.1EF1C72C36F@homiemail-mx8.g.dreamhost.com>
2011-11-14 14:17 ` [PATCH 3/6] add_to_physmap: Move the code for > XENMEM_add_to_physmap Andres Lagar-Cavilla
2011-11-13 17:40 [PATCH 0/6] IOMMU, vtd and iotlb flush rework (v7) Jean Guyader
2011-11-13 17:40 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-13 17:40 ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-13 17:40 ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-10 8:43 [PATCH 0/6] IOMMU, vtd and iotlb flush rework (v5) Jean Guyader
2011-11-10 8:43 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-10 8:44 ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-10 8:44 ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-07 18:25 IOMMU, vtd and iotlb flush rework (v3) Jean Guyader
2011-11-07 18:25 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-07 18:25 ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-07 18:25 ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-08 13:24 ` Tim Deegan
2011-11-07 15:16 IOMMU, vtd and iotlb flush rework (v2) Jean Guyader
2011-11-07 15:16 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-07 15:16 ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-07 15:16 ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-04 10:38 [PATCH 0/6] IOMMU, vtd and iotlb flush rework Jean Guyader
2011-11-04 10:38 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-04 10:38 ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-04 10:38 ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1320782668-18109-4-git-send-email-jean.guyader@eu.citrix.com \
--to=jean.guyader@eu.citrix.com \
--cc=allen.m.kay@intel.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).