* [PATCH 11/11] tmem: cleanup
@ 2012-09-05 12:41 Jan Beulich
2012-09-05 16:39 ` Dan Magenheimer
0 siblings, 1 reply; 2+ messages in thread
From: Jan Beulich @ 2012-09-05 12:41 UTC (permalink / raw)
To: xen-devel; +Cc: dan.magenheimer, Zhenzhong Duan
[-- Attachment #1: Type: text/plain, Size: 3351 bytes --]
- one more case of checking for a specific rather than any error
- drop no longer needed first parameter from cli_put_page()
- drop a redundant cast
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1467,7 +1467,7 @@ static NOINLINE int do_tmem_put_compress
pgp_free_data(pgp, pgp->us.obj->pool);
START_CYC_COUNTER(compress);
ret = tmh_compress_from_client(cmfn, &dst, &size, clibuf);
- if ( (ret == -EFAULT) || (ret == 0) )
+ if ( ret <= 0 )
goto out;
else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) {
ret = 0;
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -97,7 +97,7 @@ static inline void *cli_get_page(tmem_cl
return NULL;
}
-static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t *cli_pfp,
+static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
unsigned long cli_mfn, bool_t mark_dirty)
{
ASSERT(0);
@@ -126,20 +126,20 @@ static inline void *cli_get_page(tmem_cl
}
*pcli_mfn = page_to_mfn(page);
- *pcli_pfp = (pfp_t *)page;
+ *pcli_pfp = page;
return map_domain_page(*pcli_mfn);
}
-static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t *cli_pfp,
+static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
unsigned long cli_mfn, bool_t mark_dirty)
{
if ( mark_dirty )
{
- put_page_and_type((struct page_info *)cli_pfp);
+ put_page_and_type(cli_pfp);
paging_mark_dirty(current->domain,cli_mfn);
}
else
- put_page((struct page_info *)cli_pfp);
+ put_page(cli_pfp);
unmap_domain_page(cli_va);
}
#endif
@@ -188,7 +188,7 @@ EXPORT int tmh_copy_from_client(pfp_t *p
else if ( len )
rc = -EINVAL;
if ( cli_va )
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
unmap_domain_page(tmem_va);
return rc;
}
@@ -221,7 +221,7 @@ EXPORT int tmh_compress_from_client(tmem
ASSERT(ret == LZO_E_OK);
*out_va = dmem;
if ( cli_va )
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
return 1;
}
@@ -259,7 +259,7 @@ EXPORT int tmh_copy_to_client(tmem_cli_m
rc = -EINVAL;
unmap_domain_page(tmem_va);
if ( cli_va )
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
mb();
return rc;
}
@@ -286,7 +286,7 @@ EXPORT int tmh_decompress_to_client(tmem
ASSERT(ret == LZO_E_OK);
ASSERT(out_len == PAGE_SIZE);
if ( cli_va )
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
else if ( copy_to_guest(clibuf, scratch, PAGE_SIZE) )
return -EFAULT;
mb();
@@ -310,7 +310,7 @@ EXPORT int tmh_copy_tze_to_client(tmem_c
memcpy((char *)cli_va,(char *)tmem_va,len);
if ( len < PAGE_SIZE )
memset((char *)cli_va+len,0,PAGE_SIZE-len);
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
mb();
return 1;
}
[-- Attachment #2: tmem-cleanup.patch --]
[-- Type: text/plain, Size: 3362 bytes --]
tmem: cleanup
- one more case of checking for a specific rather than any error
- drop no longer needed first parameter from cli_put_page()
- drop a redundant cast
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -1467,7 +1467,7 @@ static NOINLINE int do_tmem_put_compress
pgp_free_data(pgp, pgp->us.obj->pool);
START_CYC_COUNTER(compress);
ret = tmh_compress_from_client(cmfn, &dst, &size, clibuf);
- if ( (ret == -EFAULT) || (ret == 0) )
+ if ( ret <= 0 )
goto out;
else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) {
ret = 0;
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -97,7 +97,7 @@ static inline void *cli_get_page(tmem_cl
return NULL;
}
-static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t *cli_pfp,
+static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
unsigned long cli_mfn, bool_t mark_dirty)
{
ASSERT(0);
@@ -126,20 +126,20 @@ static inline void *cli_get_page(tmem_cl
}
*pcli_mfn = page_to_mfn(page);
- *pcli_pfp = (pfp_t *)page;
+ *pcli_pfp = page;
return map_domain_page(*pcli_mfn);
}
-static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t *cli_pfp,
+static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
unsigned long cli_mfn, bool_t mark_dirty)
{
if ( mark_dirty )
{
- put_page_and_type((struct page_info *)cli_pfp);
+ put_page_and_type(cli_pfp);
paging_mark_dirty(current->domain,cli_mfn);
}
else
- put_page((struct page_info *)cli_pfp);
+ put_page(cli_pfp);
unmap_domain_page(cli_va);
}
#endif
@@ -188,7 +188,7 @@ EXPORT int tmh_copy_from_client(pfp_t *p
else if ( len )
rc = -EINVAL;
if ( cli_va )
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
unmap_domain_page(tmem_va);
return rc;
}
@@ -221,7 +221,7 @@ EXPORT int tmh_compress_from_client(tmem
ASSERT(ret == LZO_E_OK);
*out_va = dmem;
if ( cli_va )
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
return 1;
}
@@ -259,7 +259,7 @@ EXPORT int tmh_copy_to_client(tmem_cli_m
rc = -EINVAL;
unmap_domain_page(tmem_va);
if ( cli_va )
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
mb();
return rc;
}
@@ -286,7 +286,7 @@ EXPORT int tmh_decompress_to_client(tmem
ASSERT(ret == LZO_E_OK);
ASSERT(out_len == PAGE_SIZE);
if ( cli_va )
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
else if ( copy_to_guest(clibuf, scratch, PAGE_SIZE) )
return -EFAULT;
mb();
@@ -310,7 +310,7 @@ EXPORT int tmh_copy_tze_to_client(tmem_c
memcpy((char *)cli_va,(char *)tmem_va,len);
if ( len < PAGE_SIZE )
memset((char *)cli_va+len,0,PAGE_SIZE-len);
- cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
+ cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
mb();
return 1;
}
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH 11/11] tmem: cleanup
2012-09-05 12:41 [PATCH 11/11] tmem: cleanup Jan Beulich
@ 2012-09-05 16:39 ` Dan Magenheimer
0 siblings, 0 replies; 2+ messages in thread
From: Dan Magenheimer @ 2012-09-05 16:39 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: Zhenzhong Duan
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Wednesday, September 05, 2012 6:41 AM
> To: xen-devel
> Cc: Dan Magenheimer; Zhenzhong Duan
> Subject: [PATCH 11/11] tmem: cleanup
>
> - one more case of checking for a specific rather than any error
> - drop no longer needed first parameter from cli_put_page()
> - drop a redundant cast
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com>
> --- a/xen/common/tmem.c
> +++ b/xen/common/tmem.c
> @@ -1467,7 +1467,7 @@ static NOINLINE int do_tmem_put_compress
> pgp_free_data(pgp, pgp->us.obj->pool);
> START_CYC_COUNTER(compress);
> ret = tmh_compress_from_client(cmfn, &dst, &size, clibuf);
> - if ( (ret == -EFAULT) || (ret == 0) )
> + if ( ret <= 0 )
> goto out;
> else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) {
> ret = 0;
> --- a/xen/common/tmem_xen.c
> +++ b/xen/common/tmem_xen.c
> @@ -97,7 +97,7 @@ static inline void *cli_get_page(tmem_cl
> return NULL;
> }
>
> -static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t *cli_pfp,
> +static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
> unsigned long cli_mfn, bool_t mark_dirty)
> {
> ASSERT(0);
> @@ -126,20 +126,20 @@ static inline void *cli_get_page(tmem_cl
> }
>
> *pcli_mfn = page_to_mfn(page);
> - *pcli_pfp = (pfp_t *)page;
> + *pcli_pfp = page;
> return map_domain_page(*pcli_mfn);
> }
>
> -static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t *cli_pfp,
> +static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
> unsigned long cli_mfn, bool_t mark_dirty)
> {
> if ( mark_dirty )
> {
> - put_page_and_type((struct page_info *)cli_pfp);
> + put_page_and_type(cli_pfp);
> paging_mark_dirty(current->domain,cli_mfn);
> }
> else
> - put_page((struct page_info *)cli_pfp);
> + put_page(cli_pfp);
> unmap_domain_page(cli_va);
> }
> #endif
> @@ -188,7 +188,7 @@ EXPORT int tmh_copy_from_client(pfp_t *p
> else if ( len )
> rc = -EINVAL;
> if ( cli_va )
> - cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0);
> + cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
> unmap_domain_page(tmem_va);
> return rc;
> }
> @@ -221,7 +221,7 @@ EXPORT int tmh_compress_from_client(tmem
> ASSERT(ret == LZO_E_OK);
> *out_va = dmem;
> if ( cli_va )
> - cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0);
> + cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
> return 1;
> }
>
> @@ -259,7 +259,7 @@ EXPORT int tmh_copy_to_client(tmem_cli_m
> rc = -EINVAL;
> unmap_domain_page(tmem_va);
> if ( cli_va )
> - cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
> + cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
> mb();
> return rc;
> }
> @@ -286,7 +286,7 @@ EXPORT int tmh_decompress_to_client(tmem
> ASSERT(ret == LZO_E_OK);
> ASSERT(out_len == PAGE_SIZE);
> if ( cli_va )
> - cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
> + cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
> else if ( copy_to_guest(clibuf, scratch, PAGE_SIZE) )
> return -EFAULT;
> mb();
> @@ -310,7 +310,7 @@ EXPORT int tmh_copy_tze_to_client(tmem_c
> memcpy((char *)cli_va,(char *)tmem_va,len);
> if ( len < PAGE_SIZE )
> memset((char *)cli_va+len,0,PAGE_SIZE-len);
> - cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1);
> + cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
> mb();
> return 1;
> }
>
>
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2012-09-05 16:39 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-09-05 12:41 [PATCH 11/11] tmem: cleanup Jan Beulich
2012-09-05 16:39 ` Dan Magenheimer
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).