* [PATCH] sparse: use force attribute for vm_fault_t casts
@ 2022-05-14 14:26 Vasily Averin
2022-05-14 17:34 ` Matthew Wilcox
0 siblings, 1 reply; 3+ messages in thread
From: Vasily Averin @ 2022-05-14 14:26 UTC (permalink / raw)
To: Dan Williams, Matthew Wilcox, Jan Kara, Alexander Viro
Cc: kernel, linux-kernel, nvdimm, linux-fsdevel
Fixes sparse warnings:
./include/trace/events/fs_dax.h:10:1: sparse:
got restricted vm_fault_t
./include/trace/events/fs_dax.h:153:1: sparse:
got restricted vm_fault_t
fs/dax.c:563:39: sparse: got restricted vm_fault_t
fs/dax.c:565:39: sparse: got restricted vm_fault_t
fs/dax.c:569:31: sparse: got restricted vm_fault_t
fs/dax.c:1055:41: sparse:
got restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1461:46: sparse: got restricted vm_fault_t [usertype] ret
fs/dax.c:1477:21: sparse:
expected restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1518:51: sparse:
got restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1599:21: sparse:
expected restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1633:62: sparse:
got restricted vm_fault_t [assigned] [usertype] ret
fs/dax.c:1696:55: sparse: got restricted vm_fault_t
fs/dax.c:1711:58: sparse:
got restricted vm_fault_t [assigned] [usertype] ret
vm_fault_t type is bitwise and requires __force attribute for any casts.
Signed-off-by: Vasily Averin <vvs@openvz.org>
---
fs/dax.c | 22 +++++++++++-----------
include/linux/mm_types.h | 30 ++++++++++++++++--------------
2 files changed, 27 insertions(+), 25 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c
index 67a08a32fccb..eb1a1808f719 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -560,13 +560,13 @@ static void *grab_mapping_entry(struct xa_state *xas,
if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
goto retry;
if (xas->xa_node == XA_ERROR(-ENOMEM))
- return xa_mk_internal(VM_FAULT_OOM);
+ return xa_mk_internal((__force unsigned long)VM_FAULT_OOM);
if (xas_error(xas))
- return xa_mk_internal(VM_FAULT_SIGBUS);
+ return xa_mk_internal((__force unsigned long)VM_FAULT_SIGBUS);
return entry;
fallback:
xas_unlock_irq(xas);
- return xa_mk_internal(VM_FAULT_FALLBACK);
+ return xa_mk_internal((__force unsigned long)VM_FAULT_FALLBACK);
}
/**
@@ -1052,7 +1052,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
DAX_ZERO_PAGE, false);
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
- trace_dax_load_hole(inode, vmf, ret);
+ trace_dax_load_hole(inode, vmf, (__force int)ret);
return ret;
}
@@ -1458,7 +1458,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
void *entry;
int error;
- trace_dax_pte_fault(iter.inode, vmf, ret);
+ trace_dax_pte_fault(iter.inode, vmf, (__force int)ret);
/*
* Check whether offset isn't beyond end of file now. Caller is supposed
* to hold locks serializing us with truncate / punch hole so this is
@@ -1474,7 +1474,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
entry = grab_mapping_entry(&xas, mapping, 0);
if (xa_is_internal(entry)) {
- ret = xa_to_internal(entry);
+ ret = (__force vm_fault_t)xa_to_internal(entry);
goto out;
}
@@ -1515,7 +1515,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
unlock_entry:
dax_unlock_entry(&xas, entry);
out:
- trace_dax_pte_fault_done(iter.inode, vmf, ret);
+ trace_dax_pte_fault_done(iter.inode, vmf, (__force int)ret);
return ret;
}
@@ -1596,7 +1596,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
*/
entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
if (xa_is_internal(entry)) {
- ret = xa_to_internal(entry);
+ ret = (__force vm_fault_t)xa_to_internal(entry);
goto fallback;
}
@@ -1630,7 +1630,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
count_vm_event(THP_FAULT_FALLBACK);
}
out:
- trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
+ trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, (__force int)ret);
return ret;
}
#else
@@ -1693,7 +1693,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
put_unlocked_entry(&xas, entry, WAKE_NEXT);
xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
- VM_FAULT_NOPAGE);
+ (__force int)VM_FAULT_NOPAGE);
return VM_FAULT_NOPAGE;
}
xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
@@ -1708,7 +1708,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
else
ret = VM_FAULT_FALLBACK;
dax_unlock_entry(&xas, entry);
- trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
+ trace_dax_insert_pfn_mkwrite(mapping->host, vmf, (__force int)ret);
return ret;
}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8834e38c06a4..57cc4918b1b1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -745,20 +745,22 @@ enum vm_fault_reason {
VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \
VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
-#define VM_FAULT_RESULT_TRACE \
- { VM_FAULT_OOM, "OOM" }, \
- { VM_FAULT_SIGBUS, "SIGBUS" }, \
- { VM_FAULT_MAJOR, "MAJOR" }, \
- { VM_FAULT_WRITE, "WRITE" }, \
- { VM_FAULT_HWPOISON, "HWPOISON" }, \
- { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
- { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
- { VM_FAULT_NOPAGE, "NOPAGE" }, \
- { VM_FAULT_LOCKED, "LOCKED" }, \
- { VM_FAULT_RETRY, "RETRY" }, \
- { VM_FAULT_FALLBACK, "FALLBACK" }, \
- { VM_FAULT_DONE_COW, "DONE_COW" }, \
- { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
+#define faultflag_string(flag) {(__force unsigned long)VM_FAULT_##flag, #flag}
+
+#define VM_FAULT_RESULT_TRACE \
+ faultflag_string(OOM), \
+ faultflag_string(SIGBUS), \
+ faultflag_string(MAJOR), \
+ faultflag_string(WRITE), \
+ faultflag_string(HWPOISON), \
+ faultflag_string(HWPOISON_LARGE), \
+ faultflag_string(SIGSEGV), \
+ faultflag_string(NOPAGE), \
+ faultflag_string(LOCKED), \
+ faultflag_string(RETRY), \
+ faultflag_string(FALLBACK), \
+ faultflag_string(DONE_COW), \
+ faultflag_string(NEEDDSYNC)
struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
--
2.31.1
^ permalink raw reply related [flat|nested] 3+ messages in thread* Re: [PATCH] sparse: use force attribute for vm_fault_t casts 2022-05-14 14:26 [PATCH] sparse: use force attribute for vm_fault_t casts Vasily Averin @ 2022-05-14 17:34 ` Matthew Wilcox 2022-05-14 19:56 ` [PATCH v2] " Vasily Averin 0 siblings, 1 reply; 3+ messages in thread From: Matthew Wilcox @ 2022-05-14 17:34 UTC (permalink / raw) To: Vasily Averin Cc: Dan Williams, Jan Kara, Alexander Viro, kernel, linux-kernel, nvdimm, linux-fsdevel On Sat, May 14, 2022 at 05:26:21PM +0300, Vasily Averin wrote: > Fixes sparse warnings: > ./include/trace/events/fs_dax.h:10:1: sparse: > got restricted vm_fault_t > ./include/trace/events/fs_dax.h:153:1: sparse: > got restricted vm_fault_t > fs/dax.c:563:39: sparse: got restricted vm_fault_t > fs/dax.c:565:39: sparse: got restricted vm_fault_t > fs/dax.c:569:31: sparse: got restricted vm_fault_t > fs/dax.c:1055:41: sparse: > got restricted vm_fault_t [assigned] [usertype] ret > fs/dax.c:1461:46: sparse: got restricted vm_fault_t [usertype] ret > fs/dax.c:1477:21: sparse: > expected restricted vm_fault_t [assigned] [usertype] ret > fs/dax.c:1518:51: sparse: > got restricted vm_fault_t [assigned] [usertype] ret > fs/dax.c:1599:21: sparse: > expected restricted vm_fault_t [assigned] [usertype] ret > fs/dax.c:1633:62: sparse: > got restricted vm_fault_t [assigned] [usertype] ret > fs/dax.c:1696:55: sparse: got restricted vm_fault_t > fs/dax.c:1711:58: sparse: > got restricted vm_fault_t [assigned] [usertype] ret > > vm_fault_t type is bitwise and requires __force attribute for any casts. Well, this patch is all kinds of messy. I would rather we had better abstractions. For example ... > @@ -560,13 +560,13 @@ static void *grab_mapping_entry(struct xa_state *xas, > if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) > goto retry; > if (xas->xa_node == XA_ERROR(-ENOMEM)) > - return xa_mk_internal(VM_FAULT_OOM); > + return xa_mk_internal((__force unsigned long)VM_FAULT_OOM); > if (xas_error(xas)) > - return xa_mk_internal(VM_FAULT_SIGBUS); > + return xa_mk_internal((__force unsigned long)VM_FAULT_SIGBUS); > return entry; > fallback: > xas_unlock_irq(xas); > - return xa_mk_internal(VM_FAULT_FALLBACK); > + return xa_mk_internal((__force unsigned long)VM_FAULT_FALLBACK); > } return vm_fault_encode(VM_FAULT_xxx); > /** > @@ -1052,7 +1052,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, > DAX_ZERO_PAGE, false); > > ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); > - trace_dax_load_hole(inode, vmf, ret); > + trace_dax_load_hole(inode, vmf, (__force int)ret); Seems like trace_dax_load_hole() should take a vm_fault_t? > - trace_dax_pte_fault(iter.inode, vmf, ret); > + trace_dax_pte_fault(iter.inode, vmf, (__force int)ret); Ditto. > @@ -1474,7 +1474,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, > > entry = grab_mapping_entry(&xas, mapping, 0); > if (xa_is_internal(entry)) { > - ret = xa_to_internal(entry); > + ret = (__force vm_fault_t)xa_to_internal(entry); vm_fault_decode(entry)? ... the others seem like more of the same. So I'm in favour of what you're doing, but would rather it were done differently. Generally seeing __force casts in the body of a function is a sign that things are wrong; it's better to have them hidden in abstractions. ^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH v2] sparse: use force attribute for vm_fault_t casts 2022-05-14 17:34 ` Matthew Wilcox @ 2022-05-14 19:56 ` Vasily Averin 0 siblings, 0 replies; 3+ messages in thread From: Vasily Averin @ 2022-05-14 19:56 UTC (permalink / raw) To: Dan Williams, Matthew Wilcox, Jan Kara, Alexander Viro Cc: kernel, linux-kernel, nvdimm, linux-fsdevel Fixes sparse warnings: ./include/trace/events/fs_dax.h:10:1: sparse: got restricted vm_fault_t ./include/trace/events/fs_dax.h:153:1: sparse: got restricted vm_fault_t fs/dax.c:563:39: sparse: got restricted vm_fault_t fs/dax.c:565:39: sparse: got restricted vm_fault_t fs/dax.c:569:31: sparse: got restricted vm_fault_t fs/dax.c:1055:41: sparse: got restricted vm_fault_t [assigned] [usertype] ret fs/dax.c:1461:46: sparse: got restricted vm_fault_t [usertype] ret fs/dax.c:1477:21: sparse: expected restricted vm_fault_t [assigned] [usertype] ret fs/dax.c:1518:51: sparse: got restricted vm_fault_t [assigned] [usertype] ret fs/dax.c:1599:21: sparse: expected restricted vm_fault_t [assigned] [usertype] ret fs/dax.c:1633:62: sparse: got restricted vm_fault_t [assigned] [usertype] ret fs/dax.c:1696:55: sparse: got restricted vm_fault_t fs/dax.c:1711:58: sparse: got restricted vm_fault_t [assigned] [usertype] ret vm_fault_t type is bitwise and requires __force attribute for any casts. Signed-off-by: Vasily Averin <vvs@openvz.org> --- v2: improved according to the recommendations of Matthew Wilcox: - __force cast moved into internal functions - introduced new abstractions dax_vm_fault_[en|de]code() --- fs/dax.c | 21 +++++++++++++++------ include/linux/mm_types.h | 30 ++++++++++++++++-------------- include/trace/events/fs_dax.h | 12 ++++++------ 3 files changed, 37 insertions(+), 26 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 67a08a32fccb..c27c8782007f 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -121,6 +121,15 @@ static int dax_is_empty_entry(void *entry) return xa_to_value(entry) & DAX_EMPTY; } +static void *dax_vm_fault_encode(vm_fault_t fault) +{ + return xa_mk_internal((__force unsigned long)fault); +} + +static vm_fault_t dax_vm_fault_decode(void *entry) +{ + return (__force vm_fault_t)xa_to_internal(entry); +} /* * true if the entry that was found is of a smaller order than the entry * we were looking for @@ -560,13 +569,13 @@ static void *grab_mapping_entry(struct xa_state *xas, if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) goto retry; if (xas->xa_node == XA_ERROR(-ENOMEM)) - return xa_mk_internal(VM_FAULT_OOM); + return dax_vm_fault_encode(VM_FAULT_OOM); if (xas_error(xas)) - return xa_mk_internal(VM_FAULT_SIGBUS); + return dax_vm_fault_encode(VM_FAULT_SIGBUS); return entry; fallback: xas_unlock_irq(xas); - return xa_mk_internal(VM_FAULT_FALLBACK); + return dax_vm_fault_encode(VM_FAULT_FALLBACK); } /** @@ -1474,7 +1483,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, entry = grab_mapping_entry(&xas, mapping, 0); if (xa_is_internal(entry)) { - ret = xa_to_internal(entry); + ret = dax_vm_fault_decode(entry); goto out; } @@ -1578,7 +1587,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, */ max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); - trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); + trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, (vm_fault_t)0); if (xas.xa_index >= max_pgoff) { ret = VM_FAULT_SIGBUS; @@ -1596,7 +1605,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, */ entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); if (xa_is_internal(entry)) { - ret = xa_to_internal(entry); + ret = dax_vm_fault_decode(entry); goto fallback; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8834e38c06a4..57cc4918b1b1 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -745,20 +745,22 @@ enum vm_fault_reason { VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK) -#define VM_FAULT_RESULT_TRACE \ - { VM_FAULT_OOM, "OOM" }, \ - { VM_FAULT_SIGBUS, "SIGBUS" }, \ - { VM_FAULT_MAJOR, "MAJOR" }, \ - { VM_FAULT_WRITE, "WRITE" }, \ - { VM_FAULT_HWPOISON, "HWPOISON" }, \ - { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ - { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ - { VM_FAULT_NOPAGE, "NOPAGE" }, \ - { VM_FAULT_LOCKED, "LOCKED" }, \ - { VM_FAULT_RETRY, "RETRY" }, \ - { VM_FAULT_FALLBACK, "FALLBACK" }, \ - { VM_FAULT_DONE_COW, "DONE_COW" }, \ - { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } +#define faultflag_string(flag) {(__force unsigned long)VM_FAULT_##flag, #flag} + +#define VM_FAULT_RESULT_TRACE \ + faultflag_string(OOM), \ + faultflag_string(SIGBUS), \ + faultflag_string(MAJOR), \ + faultflag_string(WRITE), \ + faultflag_string(HWPOISON), \ + faultflag_string(HWPOISON_LARGE), \ + faultflag_string(SIGSEGV), \ + faultflag_string(NOPAGE), \ + faultflag_string(LOCKED), \ + faultflag_string(RETRY), \ + faultflag_string(FALLBACK), \ + faultflag_string(DONE_COW), \ + faultflag_string(NEEDDSYNC) struct vm_special_mapping { const char *name; /* The name, e.g. "[vdso]". */ diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h index 97b09fcf7e52..75908bdc7b2d 100644 --- a/include/trace/events/fs_dax.h +++ b/include/trace/events/fs_dax.h @@ -9,7 +9,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class, TP_PROTO(struct inode *inode, struct vm_fault *vmf, - pgoff_t max_pgoff, int result), + pgoff_t max_pgoff, vm_fault_t result), TP_ARGS(inode, vmf, max_pgoff, result), TP_STRUCT__entry( __field(unsigned long, ino) @@ -33,7 +33,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class, __entry->flags = vmf->flags; __entry->pgoff = vmf->pgoff; __entry->max_pgoff = max_pgoff; - __entry->result = result; + __entry->result = (__force int)result; ), TP_printk("dev %d:%d ino %#lx %s %s address %#lx vm_start " "%#lx vm_end %#lx pgoff %#lx max_pgoff %#lx %s", @@ -54,7 +54,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class, #define DEFINE_PMD_FAULT_EVENT(name) \ DEFINE_EVENT(dax_pmd_fault_class, name, \ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ - pgoff_t max_pgoff, int result), \ + pgoff_t max_pgoff, vm_fault_t result), \ TP_ARGS(inode, vmf, max_pgoff, result)) DEFINE_PMD_FAULT_EVENT(dax_pmd_fault); @@ -151,7 +151,7 @@ DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \ DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping); DECLARE_EVENT_CLASS(dax_pte_fault_class, - TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), + TP_PROTO(struct inode *inode, struct vm_fault *vmf, vm_fault_t result), TP_ARGS(inode, vmf, result), TP_STRUCT__entry( __field(unsigned long, ino) @@ -169,7 +169,7 @@ DECLARE_EVENT_CLASS(dax_pte_fault_class, __entry->address = vmf->address; __entry->flags = vmf->flags; __entry->pgoff = vmf->pgoff; - __entry->result = result; + __entry->result = (__force int)result; ), TP_printk("dev %d:%d ino %#lx %s %s address %#lx pgoff %#lx %s", MAJOR(__entry->dev), @@ -185,7 +185,7 @@ DECLARE_EVENT_CLASS(dax_pte_fault_class, #define DEFINE_PTE_FAULT_EVENT(name) \ DEFINE_EVENT(dax_pte_fault_class, name, \ - TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \ + TP_PROTO(struct inode *inode, struct vm_fault *vmf, vm_fault_t result), \ TP_ARGS(inode, vmf, result)) DEFINE_PTE_FAULT_EVENT(dax_pte_fault); -- 2.31.1 ^ permalink raw reply related [flat|nested] 3+ messages in thread
end of thread, other threads:[~2022-05-14 19:56 UTC | newest] Thread overview: 3+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2022-05-14 14:26 [PATCH] sparse: use force attribute for vm_fault_t casts Vasily Averin 2022-05-14 17:34 ` Matthew Wilcox 2022-05-14 19:56 ` [PATCH v2] " Vasily Averin
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).