From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Keir Fraser <keir@xen.org>, Jan Beulich <JBeulich@suse.com>,
Xen-devel <xen-devel@lists.xen.org>
Subject: Re: [PATCH] xen/tmem: Fix uses of unmatched __map_domain_page()
Date: Wed, 4 Dec 2013 17:02:15 +0000 [thread overview]
Message-ID: <529F6017.1090704@citrix.com> (raw)
In-Reply-To: <529E51A8.4000405@citrix.com>
On 03/12/13 21:48, Andrew Cooper wrote:
> On 03/12/2013 21:00, Konrad Rzeszutek Wilk wrote:
>> On Wed, Nov 27, 2013 at 02:55:04PM +0000, Andrew Cooper wrote:
>>> I noticed this while looking through tmem_xen.h with regards to the
>>> recently-discovered Coverity issues. As the issue was not noticed or
>>> referenced in Bob's cleanup series, I figured it was fair game, given its
>>> severity.
>>>
>>> __map_domain_page() *must* be matched with an unmap_domain_page(). These five
>>> static inline functions each map a page (or two), then throw away the context
>>> needed to unmap it.
>> I was trying to figure out how it worked before. I had been running with
>> tze enabled (I hope!) and I did not trigger any mapcache exhaustion.
>>
>> Ah wait, I had been on my nighly regression system - which has some
>> guests that use tmem but they don't create any load fast enough.
>>
>> Let me queue this up and test it. Bob, would appreciate you testing
>> it too - just in case.
>>
>> Thanks!
> Depends which version of Xen. Before Jan did the 16TB support during
> 4.3, __map_domain_page() was effectively a noop on 64bit, being
> mfn_to_virt().
>
> ~Andrew
After the most recent coverity run with the map_domain_page() modelling,
CIDs 1135373 thru 1135379 now apply to the fixes in this patch.
~Andrew
>
>>> Each of the changes are limited to their respective functions. In two cases,
>>> this involved replacing a large amount of pointer arithmetic with memcpy()
>>> (all callers were relying on memcpy() semantics of positive/negative returns
>>> rather than specifically -1/+1). A third case had its pointer arithmetic
>>> entirely replaced with memcpy().
>>>
>>> In addition, remove redundant casts of void pointers and assertions.
>>>
>>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>>> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>>> CC: Bob Liu <bob.liu@oracle.com>
>>> CC: Keir Fraser <keir@xen.org>
>>> CC: Jan Beulich <JBeulich@suse.com>
>>>
>>> ---
>>> These changes have been compile tested, but not functionally tested.
>>> ---
>>> xen/include/xen/tmem_xen.h | 78 +++++++++++++++++++++++---------------------
>>> 1 file changed, 40 insertions(+), 38 deletions(-)
>>>
>>> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
>>> index b26c6fa..a0d11aa 100644
>>> --- a/xen/include/xen/tmem_xen.h
>>> +++ b/xen/include/xen/tmem_xen.h
>>> @@ -228,26 +228,24 @@ static inline bool_t tmem_current_is_privileged(void)
>>>
>>> static inline uint8_t tmem_get_first_byte(struct page_info *pfp)
>>> {
>>> - void *p = __map_domain_page(pfp);
>>> + const uint8_t *p =__map_domain_page(pfp);
>>> + uint8_t byte = p[0];
>>>
>>> - return (uint8_t)(*(char *)p);
>>> + unmap_domain_page(p);
>>> +
>>> + return byte;
>>> }
>>>
>>> static inline int tmem_page_cmp(struct page_info *pfp1, struct page_info *pfp2)
>>> {
>>> - const uint64_t *p1 = (uint64_t *)__map_domain_page(pfp1);
>>> - const uint64_t *p2 = (uint64_t *)__map_domain_page(pfp2);
>>> - int i;
>>> -
>>> - // FIXME: code in assembly?
>>> -ASSERT(p1 != NULL);
>>> -ASSERT(p2 != NULL);
>>> - for ( i = PAGE_SIZE/sizeof(uint64_t); i && *p1 == *p2; i--, p1++, p2++ );
>>> - if ( !i )
>>> - return 0;
>>> - if ( *p1 < *p2 )
>>> - return -1;
>>> - return 1;
>>> + const uint64_t *p1 = __map_domain_page(pfp1);
>>> + const uint64_t *p2 = __map_domain_page(pfp2);
>>> + int rc = memcmp(p1, p2, PAGE_SIZE);
>>> +
>>> + unmap_domain_page(p2);
>>> + unmap_domain_page(p1);
>>> +
>>> + return rc;
>>> }
>>>
>>> static inline int tmem_pcd_cmp(void *va1, pagesize_t len1, void *va2, pagesize_t len2)
>>> @@ -271,54 +269,58 @@ static inline int tmem_pcd_cmp(void *va1, pagesize_t len1, void *va2, pagesize_t
>>> return 1;
>>> }
>>>
>>> -static inline int tmem_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len, void *tva, pagesize_t tze_len)
>>> +static inline int tmem_tze_pfp_cmp(struct page_info *pfp1, pagesize_t pfp_len,
>>> + void *tva, const pagesize_t tze_len)
>>> {
>>> - const uint64_t *p1 = (uint64_t *)__map_domain_page(pfp1);
>>> - const uint64_t *p2;
>>> - pagesize_t i;
>>> + const uint64_t *p1 = __map_domain_page(pfp1);
>>> + const uint64_t *p2 = tze_len == PAGE_SIZE ?
>>> + __map_domain_page((struct page_info *)tva) : tva;
>>> + int rc;
>>>
>>> - if ( tze_len == PAGE_SIZE )
>>> - p2 = (uint64_t *)__map_domain_page((struct page_info *)tva);
>>> - else
>>> - p2 = (uint64_t *)tva;
>>> ASSERT(pfp_len <= PAGE_SIZE);
>>> ASSERT(!(pfp_len & (sizeof(uint64_t)-1)));
>>> ASSERT(tze_len <= PAGE_SIZE);
>>> ASSERT(!(tze_len & (sizeof(uint64_t)-1)));
>>> if ( pfp_len < tze_len )
>>> - return -1;
>>> - if ( pfp_len > tze_len )
>>> - return 1;
>>> - ASSERT(pfp_len == tze_len);
>>> - for ( i = tze_len/sizeof(uint64_t); i && *p1 == *p2; i--, p1++, p2++ );
>>> - if ( !i )
>>> - return 0;
>>> - if ( *p1 < *p2 )
>>> - return -1;
>>> - return 1;
>>> + rc = -1;
>>> + else if ( pfp_len > tze_len )
>>> + rc = 1;
>>> + else
>>> + rc = memcmp(p1, p2, tze_len);
>>> +
>>> + if ( tze_len == PAGE_SIZE )
>>> + unmap_domain_page(p2);
>>> + unmap_domain_page(p1);
>>> +
>>> + return rc;
>>> }
>>>
>>> /* return the size of the data in the pfp, ignoring trailing zeroes and
>>> * rounded up to the nearest multiple of 8 */
>>> static inline pagesize_t tmem_tze_pfp_scan(struct page_info *pfp)
>>> {
>>> - const uint64_t *p = (uint64_t *)__map_domain_page(pfp);
>>> + const uint64_t *const page = __map_domain_page(pfp);
>>> + const uint64_t *p = page;
>>> pagesize_t bytecount = PAGE_SIZE;
>>> pagesize_t len = PAGE_SIZE/sizeof(uint64_t);
>>> +
>>> p += len;
>>> while ( len-- && !*--p )
>>> bytecount -= sizeof(uint64_t);
>>> +
>>> + unmap_domain_page(page);
>>> +
>>> return bytecount;
>>> }
>>>
>>> static inline void tmem_tze_copy_from_pfp(void *tva, struct page_info *pfp, pagesize_t len)
>>> {
>>> - uint64_t *p1 = (uint64_t *)tva;
>>> - const uint64_t *p2 = (uint64_t *)__map_domain_page(pfp);
>>> + const uint64_t *p = __map_domain_page(pfp);
>>>
>>> - pagesize_t i;
>>> ASSERT(!(len & (sizeof(uint64_t)-1)));
>>> - for ( i = len/sizeof(uint64_t); i--; *p1++ = *p2++);
>>> + memcpy(tva, p, len);
>>> +
>>> + unmap_domain_page(p);
>>> }
>>>
>>> /* these typedefs are in the public/tmem.h interface
>>> --
>>> 1.7.10.4
>>>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2013-12-04 17:02 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-11-27 14:55 [PATCH] xen/tmem: Fix uses of unmatched __map_domain_page() Andrew Cooper
2013-11-29 8:49 ` Bob Liu
2013-12-03 21:00 ` Konrad Rzeszutek Wilk
2013-12-03 21:48 ` Andrew Cooper
2013-12-04 17:02 ` Andrew Cooper [this message]
2013-12-06 6:45 ` Bob Liu
2013-12-06 14:38 ` Konrad Rzeszutek Wilk
2013-12-06 14:40 ` Andrew Cooper
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=529F6017.1090704@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=keir@xen.org \
--cc=konrad.wilk@oracle.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).