From mboxrd@z Thu Jan 1 00:00:00 1970 From: Ian Campbell Subject: [PATCH v4 6/6] xen: arm: relax barriers when flushing caches Date: Thu, 3 Apr 2014 09:59:45 +0100 Message-ID: <1396515585-5737-6-git-send-email-ian.campbell@citrix.com> References: <1396515560.4211.33.camel@kazak.uk.xensource.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1396515560.4211.33.camel@kazak.uk.xensource.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: julien.grall@linaro.org, tim@xen.org, Ian Campbell , stefano.stabellini@eu.citrix.com List-Id: xen-devel@lists.xenproject.org We only need an inner shareable barrier here. Signed-off-by: Ian Campbell --- v4: new patch --- xen/include/asm-arm/page.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h index a96e40b..b4d5597 100644 --- a/xen/include/asm-arm/page.h +++ b/xen/include/asm-arm/page.h @@ -263,20 +263,20 @@ extern size_t cacheline_bytes; static inline void clean_xen_dcache_va_range(void *p, unsigned long size) { void *end; - dsb(sy); /* So the CPU issues all writes to the range */ + dsb(ish); /* So the CPU issues all writes to the range */ for ( end = p + size; p < end; p += cacheline_bytes ) asm volatile (__clean_xen_dcache_one(0) : : "r" (p)); - dsb(sy); /* So we know the flushes happen before continuing */ + dsb(ish); /* So we know the flushes happen before continuing */ } static inline void clean_and_invalidate_xen_dcache_va_range (void *p, unsigned long size) { void *end; - dsb(sy); /* So the CPU issues all writes to the range */ + dsb(ish); /* So the CPU issues all writes to the range */ for ( end = p + size; p < end; p += cacheline_bytes ) asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p)); - dsb(sy); /* So we know the flushes happen before continuing */ + dsb(ish); /* So we know the flushes happen before continuing */ } /* Macros for flushing a single small item. The predicate is always @@ -288,9 +288,9 @@ static inline void clean_and_invalidate_xen_dcache_va_range clean_xen_dcache_va_range(_p, sizeof(x)); \ else \ asm volatile ( \ - "dsb sy;" /* Finish all earlier writes */ \ + "dsb ish;" /* Finish all earlier writes */ \ __clean_xen_dcache_one(0) \ - "dsb sy;" /* Finish flush before continuing */ \ + "dsb ish;" /* Finish flush before continuing */ \ : : "r" (_p), "m" (*_p)); \ } while (0) @@ -300,9 +300,9 @@ static inline void clean_and_invalidate_xen_dcache_va_range clean_and_invalidate_xen_dcache_va_range(_p, sizeof(x)); \ else \ asm volatile ( \ - "dsb sy;" /* Finish all earlier writes */ \ + "dsb ish;" /* Finish all earlier writes */ \ __clean_and_invalidate_xen_dcache_one(0) \ - "dsb sy;" /* Finish flush before continuing */ \ + "dsb ish;" /* Finish flush before continuing */ \ : : "r" (_p), "m" (*_p)); \ } while (0) -- 1.7.10.4