From: Julien Grall <julien.grall@arm.com>
To: xen-devel@lists.xen.org
Cc: proskurin@sec.in.tum.de, Julien Grall <julien.grall@arm.com>,
sstabellini@kernel.org, steve.capper@arm.com,
wei.chen@linaro.org
Subject: [PATCH v2 04/15] xen/arm: p2m: Find the memory attributes based on the p2m type
Date: Thu, 28 Jul 2016 15:20:09 +0100 [thread overview]
Message-ID: <1469715620-4670-5-git-send-email-julien.grall@arm.com> (raw)
In-Reply-To: <1469715620-4670-1-git-send-email-julien.grall@arm.com>
Currently, mfn_to_p2m_entry is relying on the caller to provide the
correct memory attribute and will deduce the sharability based on it.
Some of the callers, such as p2m_create_table, are using same memory
attribute regardless the underlying p2m type. For instance, this will
lead to use change the memory attribute from MATTR_DEV to MATTR_MEM when
a MMIO superpage is shattered.
Furthermore, it makes more difficult to support different shareability
with the same memory attribute.
All the memory attributes could be deduced via the p2m type. This will
simplify the code by dropping one parameter.
Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
I am not sure whether p2m_direct_mmio_c (cacheable MMIO) should use
the outer-shareability or inner-shareability. Any opinions?
Changes in v2:
- Forgot to add my signed-off-by
- Add Stefano's reviewed-by
---
xen/arch/arm/p2m.c | 55 ++++++++++++++++++++++++------------------------------
1 file changed, 24 insertions(+), 31 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index cffb12e..08f3f17 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -325,8 +325,7 @@ static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
}
}
-static lpae_t mfn_to_p2m_entry(mfn_t mfn, unsigned int mattr,
- p2m_type_t t, p2m_access_t a)
+static lpae_t mfn_to_p2m_entry(mfn_t mfn, p2m_type_t t, p2m_access_t a)
{
/*
* sh, xn and write bit will be defined in the following switches
@@ -335,7 +334,6 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, unsigned int mattr,
lpae_t e = (lpae_t) {
.p2m.af = 1,
.p2m.read = 1,
- .p2m.mattr = mattr,
.p2m.table = 1,
.p2m.valid = 1,
.p2m.type = t,
@@ -343,18 +341,21 @@ static lpae_t mfn_to_p2m_entry(mfn_t mfn, unsigned int mattr,
BUILD_BUG_ON(p2m_max_real_type > (1 << 4));
- switch (mattr)
+ switch ( t )
{
- case MATTR_MEM:
- e.p2m.sh = LPAE_SH_INNER;
+ case p2m_mmio_direct_nc:
+ e.p2m.mattr = MATTR_DEV;
+ e.p2m.sh = LPAE_SH_OUTER;
break;
- case MATTR_DEV:
+ case p2m_mmio_direct_c:
+ e.p2m.mattr = MATTR_MEM;
e.p2m.sh = LPAE_SH_OUTER;
break;
+
default:
- BUG();
- break;
+ e.p2m.mattr = MATTR_MEM;
+ e.p2m.sh = LPAE_SH_INNER;
}
p2m_set_permission(&e, t, a);
@@ -422,7 +423,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
for ( i=0 ; i < LPAE_ENTRIES; i++ )
{
pte = mfn_to_p2m_entry(mfn_add(mfn, i << (level_shift - LPAE_SHIFT)),
- MATTR_MEM, t, p2m->default_access);
+ t, p2m->default_access);
/*
* First and second level super pages set p2m.table = 0, but
@@ -444,7 +445,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
unmap_domain_page(p);
- pte = mfn_to_p2m_entry(_mfn(page_to_mfn(page)), MATTR_MEM, p2m_invalid,
+ pte = mfn_to_p2m_entry(_mfn(page_to_mfn(page)), p2m_invalid,
p2m->default_access);
p2m_write_pte(entry, pte, flush_cache);
@@ -665,7 +666,6 @@ static int apply_one_level(struct domain *d,
paddr_t *addr,
paddr_t *maddr,
bool_t *flush,
- int mattr,
p2m_type_t t,
p2m_access_t a)
{
@@ -694,7 +694,7 @@ static int apply_one_level(struct domain *d,
return rc;
/* New mapping is superpage aligned, make it */
- pte = mfn_to_p2m_entry(_mfn(*maddr >> PAGE_SHIFT), mattr, t, a);
+ pte = mfn_to_p2m_entry(_mfn(*maddr >> PAGE_SHIFT), t, a);
if ( level < 3 )
pte.p2m.table = 0; /* Superpage entry */
@@ -914,7 +914,6 @@ static int apply_p2m_changes(struct domain *d,
gfn_t sgfn,
unsigned long nr,
mfn_t smfn,
- int mattr,
uint32_t mask,
p2m_type_t t,
p2m_access_t a)
@@ -1053,7 +1052,7 @@ static int apply_p2m_changes(struct domain *d,
level, flush_pt, op,
start_gpaddr, end_gpaddr,
&addr, &maddr, &flush,
- mattr, t, a);
+ t, a);
if ( ret < 0 ) { rc = ret ; goto out; }
count += ret;
@@ -1162,7 +1161,7 @@ out:
* mapping.
*/
apply_p2m_changes(d, REMOVE, sgfn, gfn - gfn_x(sgfn), smfn,
- mattr, 0, p2m_invalid, d->arch.p2m.default_access);
+ 0, p2m_invalid, d->arch.p2m.default_access);
}
return rc;
@@ -1172,10 +1171,10 @@ static inline int p2m_insert_mapping(struct domain *d,
gfn_t start_gfn,
unsigned long nr,
mfn_t mfn,
- int mattr, p2m_type_t t)
+ p2m_type_t t)
{
return apply_p2m_changes(d, INSERT, start_gfn, nr, mfn,
- mattr, 0, t, d->arch.p2m.default_access);
+ 0, t, d->arch.p2m.default_access);
}
static inline int p2m_remove_mapping(struct domain *d,
@@ -1185,8 +1184,7 @@ static inline int p2m_remove_mapping(struct domain *d,
{
return apply_p2m_changes(d, REMOVE, start_gfn, nr, mfn,
/* arguments below not used when removing mapping */
- MATTR_MEM, 0, p2m_invalid,
- d->arch.p2m.default_access);
+ 0, p2m_invalid, d->arch.p2m.default_access);
}
int map_regions_rw_cache(struct domain *d,
@@ -1194,8 +1192,7 @@ int map_regions_rw_cache(struct domain *d,
unsigned long nr,
mfn_t mfn)
{
- return p2m_insert_mapping(d, gfn, nr, mfn,
- MATTR_MEM, p2m_mmio_direct_c);
+ return p2m_insert_mapping(d, gfn, nr, mfn, p2m_mmio_direct_c);
}
int unmap_regions_rw_cache(struct domain *d,
@@ -1211,8 +1208,7 @@ int map_mmio_regions(struct domain *d,
unsigned long nr,
mfn_t mfn)
{
- return p2m_insert_mapping(d, start_gfn, nr, mfn,
- MATTR_DEV, p2m_mmio_direct_nc);
+ return p2m_insert_mapping(d, start_gfn, nr, mfn, p2m_mmio_direct_nc);
}
int unmap_mmio_regions(struct domain *d,
@@ -1250,8 +1246,7 @@ int guest_physmap_add_entry(struct domain *d,
unsigned long page_order,
p2m_type_t t)
{
- return p2m_insert_mapping(d, gfn, (1 << page_order), mfn,
- MATTR_MEM, t);
+ return p2m_insert_mapping(d, gfn, (1 << page_order), mfn, t);
}
void guest_physmap_remove_page(struct domain *d,
@@ -1411,7 +1406,7 @@ int relinquish_p2m_mapping(struct domain *d)
nr = gfn_x(p2m->max_mapped_gfn) - gfn_x(p2m->lowest_mapped_gfn);
return apply_p2m_changes(d, RELINQUISH, p2m->lowest_mapped_gfn, nr,
- INVALID_MFN, MATTR_MEM, 0, p2m_invalid,
+ INVALID_MFN, 0, p2m_invalid,
d->arch.p2m.default_access);
}
@@ -1424,8 +1419,7 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
end = gfn_min(end, p2m->max_mapped_gfn);
return apply_p2m_changes(d, CACHEFLUSH, start, nr, INVALID_MFN,
- MATTR_MEM, 0, p2m_invalid,
- d->arch.p2m.default_access);
+ 0, p2m_invalid, d->arch.p2m.default_access);
}
mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
@@ -1826,8 +1820,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
}
rc = apply_p2m_changes(d, MEMACCESS, gfn_add(gfn, start),
- (nr - start), INVALID_MFN,
- MATTR_MEM, mask, 0, a);
+ (nr - start), INVALID_MFN, mask, 0, a);
if ( rc < 0 )
return rc;
else if ( rc > 0 )
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-07-28 14:20 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-07-28 14:20 [PATCH v2 00/15] xen/arm: P2M clean-up fixes Julien Grall
2016-07-28 14:20 ` [PATCH v2 01/15] xen/arm: p2m: Use the typesafe MFN in mfn_to_p2m_entry Julien Grall
2016-07-28 14:20 ` [PATCH v2 02/15] xen/arm: p2m: Use a whitelist rather than blacklist in get_page_from_gfn Julien Grall
2016-07-29 0:38 ` Stefano Stabellini
2016-07-28 14:20 ` [PATCH v2 03/15] xen/arm: p2m: Differentiate cacheable vs non-cacheable MMIO Julien Grall
2016-07-28 14:20 ` Julien Grall [this message]
2016-07-28 14:20 ` [PATCH v2 05/15] xen/arm: p2m: Remove unnecessary locking Julien Grall
2016-07-28 14:20 ` [PATCH v2 06/15] xen/arm: p2m: Introduce p2m_{read, write}_{, un}lock helpers Julien Grall
2016-07-28 14:20 ` [PATCH v2 07/15] xen/arm: p2m: Switch the p2m lock from spinlock to rwlock Julien Grall
2016-07-28 14:20 ` [PATCH v2 08/15] xen/arm: Don't call p2m_alloc_table from arch_domain_create Julien Grall
2016-07-28 14:20 ` [PATCH v2 09/15] xen/arm: p2m: Move the vttbr field from arch_domain to p2m_domain Julien Grall
2016-07-29 0:40 ` Stefano Stabellini
2016-07-28 14:20 ` [PATCH v2 10/15] xen/arm: p2m: Don't need to restore the state for an idle vCPU Julien Grall
2016-07-28 14:20 ` [PATCH v2 11/15] xen/arm: p2m: Rework the context switch to another VTTBR in flush_tlb_domain Julien Grall
2016-07-29 0:40 ` Stefano Stabellini
2016-07-28 14:20 ` [PATCH v2 12/15] xen/arm: p2m: Inline p2m_load_VTTBR into p2m_restore_state Julien Grall
2016-07-28 14:20 ` [PATCH v2 13/15] xen/arm: Don't export flush_tlb_domain Julien Grall
2016-07-28 14:20 ` [PATCH v2 14/15] xen/arm: p2m: Replace flush_tlb_domain by p2m_flush_tlb Julien Grall
2016-07-28 14:20 ` [PATCH v2 15/15] xen/arm: p2m: Pass the p2m in parameter rather the domain when it is possible Julien Grall
2016-07-29 0:47 ` [PATCH v2 00/15] xen/arm: P2M clean-up fixes Stefano Stabellini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1469715620-4670-5-git-send-email-julien.grall@arm.com \
--to=julien.grall@arm.com \
--cc=proskurin@sec.in.tum.de \
--cc=sstabellini@kernel.org \
--cc=steve.capper@arm.com \
--cc=wei.chen@linaro.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).