From: Ian Campbell <ian.campbell@citrix.com>
To: xen-devel@lists.xen.org
Cc: julien.grall@linaro.org, tim@xen.org,
Ian Campbell <ian.campbell@citrix.com>,
stefano.stabellini@eu.citrix.com
Subject: [PATCH v4 5/5] xen: arm: correct terminology for cache flush macros
Date: Fri, 7 Feb 2014 12:12:56 +0000 [thread overview]
Message-ID: <1391775176-30313-5-git-send-email-ian.campbell@citrix.com> (raw)
In-Reply-To: <1391775139.2162.88.camel@kazak.uk.xensource.com>
The term "flush" is slightly ambiguous. The correct ARM term for for this
operaton is clean, as opposed to clean+invalidate for which we also now have a
function.
This is a pure rename, no functional change.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
This could easily be left for 4.5.
---
xen/arch/arm/guestcopy.c | 2 +-
xen/arch/arm/kernel.c | 2 +-
xen/arch/arm/mm.c | 16 ++++++++--------
xen/arch/arm/smpboot.c | 2 +-
xen/include/asm-arm/arm32/page.h | 2 +-
xen/include/asm-arm/arm64/page.h | 2 +-
xen/include/asm-arm/page.h | 10 +++++-----
7 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index bd0a355..af0af6b 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -24,7 +24,7 @@ static unsigned long raw_copy_to_guest_helper(void *to, const void *from,
p += offset;
memcpy(p, from, size);
if ( flush_dcache )
- flush_xen_dcache_va_range(p, size);
+ clean_xen_dcache_va_range(p, size);
unmap_domain_page(p - offset);
len -= size;
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 6a5772b..1e3107d 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -58,7 +58,7 @@ void copy_from_paddr(void *dst, paddr_t paddr, unsigned long len, int attrindx)
set_fixmap(FIXMAP_MISC, p, attrindx);
memcpy(dst, src + s, l);
- flush_xen_dcache_va_range(dst, l);
+ clean_xen_dcache_va_range(dst, l);
paddr += l;
dst += l;
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index d2cfe64..4c5cff0 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -480,13 +480,13 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
/* Clear the copy of the boot pagetables. Each secondary CPU
* rebuilds these itself (see head.S) */
memset(boot_pgtable, 0x0, PAGE_SIZE);
- flush_xen_dcache(boot_pgtable);
+ clean_xen_dcache(boot_pgtable);
#ifdef CONFIG_ARM_64
memset(boot_first, 0x0, PAGE_SIZE);
- flush_xen_dcache(boot_first);
+ clean_xen_dcache(boot_first);
#endif
memset(boot_second, 0x0, PAGE_SIZE);
- flush_xen_dcache(boot_second);
+ clean_xen_dcache(boot_second);
/* Break up the Xen mapping into 4k pages and protect them separately. */
for ( i = 0; i < LPAE_ENTRIES; i++ )
@@ -524,7 +524,7 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
/* Make sure it is clear */
memset(this_cpu(xen_dommap), 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
- flush_xen_dcache_va_range(this_cpu(xen_dommap),
+ clean_xen_dcache_va_range(this_cpu(xen_dommap),
DOMHEAP_SECOND_PAGES*PAGE_SIZE);
#endif
}
@@ -535,7 +535,7 @@ int init_secondary_pagetables(int cpu)
/* Set init_ttbr for this CPU coming up. All CPus share a single setof
* pagetables, but rewrite it each time for consistency with 32 bit. */
init_ttbr = (uintptr_t) xen_pgtable + phys_offset;
- flush_xen_dcache(init_ttbr);
+ clean_xen_dcache(init_ttbr);
return 0;
}
#else
@@ -570,15 +570,15 @@ int init_secondary_pagetables(int cpu)
write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte);
}
- flush_xen_dcache_va_range(first, PAGE_SIZE);
- flush_xen_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
+ clean_xen_dcache_va_range(first, PAGE_SIZE);
+ clean_xen_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
per_cpu(xen_pgtable, cpu) = first;
per_cpu(xen_dommap, cpu) = domheap;
/* Set init_ttbr for this CPU coming up */
init_ttbr = __pa(first);
- flush_xen_dcache(init_ttbr);
+ clean_xen_dcache(init_ttbr);
return 0;
}
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index c53c765..a829957 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -378,7 +378,7 @@ int __cpu_up(unsigned int cpu)
/* Open the gate for this CPU */
smp_up_cpu = cpu_logical_map(cpu);
- flush_xen_dcache(smp_up_cpu);
+ clean_xen_dcache(smp_up_cpu);
rc = arch_cpu_up(cpu);
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index cb6add4..b8221ca 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -20,7 +20,7 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
}
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
-#define __flush_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
+#define __clean_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
/* Inline ASM to clean and invalidate dcache on register R (may be an
* inline asm operand) */
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index baf8903..3352821 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -15,7 +15,7 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
}
/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
-#define __flush_xen_dcache_one(R) "dc cvac, %" #R ";"
+#define __clean_xen_dcache_one(R) "dc cvac, %" #R ";"
/* Inline ASM to clean and invalidate dcache on register R (may be an
* inline asm operand) */
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 67d64c9..a577942 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -229,26 +229,26 @@ extern size_t cacheline_bytes;
/* Function for flushing medium-sized areas.
* if 'range' is large enough we might want to use model-specific
* full-cache flushes. */
-static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
+static inline void clean_xen_dcache_va_range(void *p, unsigned long size)
{
void *end;
dsb(); /* So the CPU issues all writes to the range */
for ( end = p + size; p < end; p += cacheline_bytes )
- asm volatile (__flush_xen_dcache_one(0) : : "r" (p));
+ asm volatile (__clean_xen_dcache_one(0) : : "r" (p));
dsb(); /* So we know the flushes happen before continuing */
}
/* Macro for flushing a single small item. The predicate is always
* compile-time constant so this will compile down to 3 instructions in
* the common case. */
-#define flush_xen_dcache(x) do { \
+#define clean_xen_dcache(x) do { \
typeof(x) *_p = &(x); \
if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) ) \
- flush_xen_dcache_va_range(_p, sizeof(x)); \
+ clean_xen_dcache_va_range(_p, sizeof(x)); \
else \
asm volatile ( \
"dsb sy;" /* Finish all earlier writes */ \
- __flush_xen_dcache_one(0) \
+ __clean_xen_dcache_one(0) \
"dsb sy;" /* Finish flush before continuing */ \
: : "r" (_p), "m" (*_p)); \
} while (0)
--
1.7.10.4
next prev parent reply other threads:[~2014-02-07 12:12 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-02-07 12:12 [PATCH 0/5 v4] xen/arm: fix guest builder cache cohenrency (again, again) Ian Campbell
2014-02-07 12:12 ` [PATCH v4 1/5] xen: arm: rename create_p2m_entries to apply_p2m_changes Ian Campbell
2014-02-07 12:12 ` [PATCH v4 2/5] xen: arm: rename p2m next_gfn_to_relinquish to lowest_mapped_gfn Ian Campbell
2014-02-07 12:12 ` [PATCH v4 3/5] xen/arm: clean and invalidate all guest caches by VMID after domain build Ian Campbell
2014-02-07 12:57 ` Jan Beulich
2014-02-07 14:34 ` Ian Campbell
2014-02-10 13:49 ` Jan Beulich
2014-02-10 14:02 ` Ian Campbell
2014-02-10 14:26 ` Jan Beulich
2014-02-11 13:29 ` Ian Campbell
2014-02-07 13:24 ` Julien Grall
2014-02-07 14:49 ` Stefano Stabellini
2014-02-07 15:01 ` Ian Campbell
2014-02-07 15:09 ` Stefano Stabellini
2014-02-07 12:12 ` [PATCH v4 4/5] Revert "xen: arm: force guest memory accesses to cacheable when MMU is disabled" Ian Campbell
2014-02-07 12:12 ` Ian Campbell [this message]
2014-02-07 13:10 ` [PATCH v4 5/5] xen: arm: correct terminology for cache flush macros Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1391775176-30313-5-git-send-email-ian.campbell@citrix.com \
--to=ian.campbell@citrix.com \
--cc=julien.grall@linaro.org \
--cc=stefano.stabellini@eu.citrix.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).