* [RFC 1/2] refcount: introduce generic lockptr funcs
@ 2023-11-03 16:16 Alexander Aring
2023-11-03 16:16 ` [RFC 2/2] kref: introduce kref_put_lockptr() and use lockptr Alexander Aring
2023-11-03 18:54 ` [RFC 1/2] refcount: introduce generic lockptr funcs Peter Zijlstra
0 siblings, 2 replies; 9+ messages in thread
From: Alexander Aring @ 2023-11-03 16:16 UTC (permalink / raw)
To: will; +Cc: gfs2, aahringo, peterz, boqun.feng, mark.rutland, linux-kernel
This patch introduce lockptr refcount operations. Currently refcount has
a lot of refcount_dec_and_lock() functionality for most common used
locktype. Those functions look mostly all the same and is duplicated
inside the refcount implementation. Instead of introducing a new whole
refcount_dec_and_lock() functionality e.g. for rwlock_t and their _bh
variants this patch will introduce lockptr. A lockptr is just a void *
and refers to the actual locking instance that can even be an own
locking type. Over the passed callbacks for lock and unlock operations
the void *lockptr becomes to the real thing by casting it and do the
locktype specific lock operation.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
include/linux/refcount.h | 15 +++++++
lib/refcount.c | 92 ++++++++++++++++++++++++++++------------
2 files changed, 80 insertions(+), 27 deletions(-)
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a62fcca97486..7b1fb85212cc 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -366,4 +366,19 @@ extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags) __cond_acquires(lock);
+extern bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
+ void (*unlock)(void *lockptr), void *lockptr) __cond_acquires(lockptr);
+
+extern void lockptr_mutex_lock(void *lockptr) __acquires(lockptr);
+extern void lockptr_mutex_unlock(void *lockptr) __releases(lockptr);
+extern void lockptr_spin_lock(void *lockptr) __acquires(lockptr);
+extern void lockptr_spin_unlock(void *lockptr) __releases(lockptr);
+
+struct lockptr_irqsave_data {
+ void *lockptr;
+ unsigned long *flags;
+};
+extern void lockptr_irqsave(void *lockptr) __acquires(lockptr);
+extern void lockptr_irqsave(void *lockptr) __releases(lockptr);
+
#endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3c..e28678f0f473 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -94,6 +94,34 @@ bool refcount_dec_not_one(refcount_t *r)
}
EXPORT_SYMBOL(refcount_dec_not_one);
+bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
+ void (*unlock)(void *lockptr), void *lockptr)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ lock(lockptr);
+ if (!refcount_dec_and_test(r)) {
+ unlock(lockptr);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lockptr);
+
+void lockptr_mutex_lock(void *lockptr)
+{
+ mutex_lock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_mutex_lock);
+
+void lockptr_mutex_unlock(void *lockptr)
+{
+ mutex_unlock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_mutex_unlock);
+
/**
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
* refcount to 0
@@ -112,18 +140,22 @@ EXPORT_SYMBOL(refcount_dec_not_one);
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
- if (refcount_dec_not_one(r))
- return false;
+ return refcount_dec_and_lockptr(r, lockptr_mutex_lock,
+ lockptr_mutex_unlock, lock);
+}
+EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
- mutex_lock(lock);
- if (!refcount_dec_and_test(r)) {
- mutex_unlock(lock);
- return false;
- }
+void lockptr_spin_lock(void *lockptr)
+{
+ spin_lock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_spin_lock);
- return true;
+void lockptr_spin_unlock(void *lockptr)
+{
+ spin_unlock(lockptr);
}
-EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
+EXPORT_SYMBOL(lockptr_spin_unlock);
/**
* refcount_dec_and_lock - return holding spinlock if able to decrement
@@ -143,18 +175,26 @@ EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
- if (refcount_dec_not_one(r))
- return false;
+ return refcount_dec_and_lockptr(r, lockptr_spin_lock,
+ lockptr_spin_unlock, lock);
+}
+EXPORT_SYMBOL(refcount_dec_and_lock);
- spin_lock(lock);
- if (!refcount_dec_and_test(r)) {
- spin_unlock(lock);
- return false;
- }
+void lockptr_lock_irqsave(void *lockptr)
+{
+ struct lockptr_irqsave_data *d = lockptr;
- return true;
+ spin_lock_irqsave(d->lockptr, *d->flags);
}
-EXPORT_SYMBOL(refcount_dec_and_lock);
+EXPORT_SYMBOL(lockptr_lock_irqsave);
+
+void lockptr_unlock_irqsave(void *lockptr)
+{
+ struct lockptr_irqsave_data *d = lockptr;
+
+ spin_unlock_irqrestore(d->lockptr, *d->flags);
+}
+EXPORT_SYMBOL(lockptr_unlock_irqsave);
/**
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
@@ -172,15 +212,13 @@ EXPORT_SYMBOL(refcount_dec_and_lock);
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
- if (refcount_dec_not_one(r))
- return false;
+ struct lockptr_irqsave_data d = {
+ .lockptr = lock,
+ .flags = flags,
+ };
- spin_lock_irqsave(lock, *flags);
- if (!refcount_dec_and_test(r)) {
- spin_unlock_irqrestore(lock, *flags);
- return false;
- }
-
- return true;
+ return refcount_dec_and_lockptr(r, lockptr_lock_irqsave,
+ lockptr_unlock_irqsave, &d);
}
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
+
--
2.39.3
^ permalink raw reply related [flat|nested] 9+ messages in thread* [RFC 2/2] kref: introduce kref_put_lockptr() and use lockptr
2023-11-03 16:16 [RFC 1/2] refcount: introduce generic lockptr funcs Alexander Aring
@ 2023-11-03 16:16 ` Alexander Aring
2023-11-03 18:54 ` [RFC 1/2] refcount: introduce generic lockptr funcs Peter Zijlstra
1 sibling, 0 replies; 9+ messages in thread
From: Alexander Aring @ 2023-11-03 16:16 UTC (permalink / raw)
To: will; +Cc: gfs2, aahringo, peterz, boqun.feng, mark.rutland, linux-kernel
This patch switches to make kref_put_lock() more locktype independend by
introducing kref_put_lockptr() and using refcount_dec_and_lockptr(). The
user can now pass a lockptr and do the specific locktype operation by
parameters. The current kref_put_mutex() and kref_put_lock() has been
adapted to use the new kref_put_lockptr() implementation for existing
users.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
include/linux/kref.h | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/include/linux/kref.h b/include/linux/kref.h
index d32e21a2538c..09bc79435dbb 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -68,26 +68,33 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return 0;
}
-static inline int kref_put_mutex(struct kref *kref,
- void (*release)(struct kref *kref),
- struct mutex *lock)
+static inline int kref_put_lockptr(struct kref *kref,
+ void (*release)(struct kref *kref),
+ void (*lock)(void *lockptr),
+ void (*unlock)(void *lockptr),
+ void *lockptr)
{
- if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
+ if (refcount_dec_and_lockptr(&kref->refcount, lock, unlock, lockptr)) {
release(kref);
return 1;
}
return 0;
}
+static inline int kref_put_mutex(struct kref *kref,
+ void (*release)(struct kref *kref),
+ struct mutex *lock)
+{
+ return kref_put_lockptr(kref, release, lockptr_mutex_lock,
+ lockptr_mutex_unlock, lock);
+}
+
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
- if (refcount_dec_and_lock(&kref->refcount, lock)) {
- release(kref);
- return 1;
- }
- return 0;
+ return kref_put_lockptr(kref, release, lockptr_spin_lock,
+ lockptr_spin_unlock, lock);
}
/**
--
2.39.3
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [RFC 1/2] refcount: introduce generic lockptr funcs
2023-11-03 16:16 [RFC 1/2] refcount: introduce generic lockptr funcs Alexander Aring
2023-11-03 16:16 ` [RFC 2/2] kref: introduce kref_put_lockptr() and use lockptr Alexander Aring
@ 2023-11-03 18:54 ` Peter Zijlstra
2023-11-03 19:20 ` Alexander Aring
1 sibling, 1 reply; 9+ messages in thread
From: Peter Zijlstra @ 2023-11-03 18:54 UTC (permalink / raw)
To: Alexander Aring; +Cc: will, gfs2, boqun.feng, mark.rutland, linux-kernel
On Fri, Nov 03, 2023 at 12:16:34PM -0400, Alexander Aring wrote:
> diff --git a/lib/refcount.c b/lib/refcount.c
> index a207a8f22b3c..e28678f0f473 100644
> --- a/lib/refcount.c
> +++ b/lib/refcount.c
> @@ -94,6 +94,34 @@ bool refcount_dec_not_one(refcount_t *r)
> }
> EXPORT_SYMBOL(refcount_dec_not_one);
>
> +bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
> + void (*unlock)(void *lockptr), void *lockptr)
> +{
> + if (refcount_dec_not_one(r))
> + return false;
> +
> + lock(lockptr);
> + if (!refcount_dec_and_test(r)) {
> + unlock(lockptr);
> + return false;
> + }
> +
> + return true;
> +}
> +EXPORT_SYMBOL(refcount_dec_and_lockptr);
This is terrible, you're forcing indirect calls on everything.
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [RFC 1/2] refcount: introduce generic lockptr funcs
2023-11-03 18:54 ` [RFC 1/2] refcount: introduce generic lockptr funcs Peter Zijlstra
@ 2023-11-03 19:20 ` Alexander Aring
2023-11-06 11:11 ` Peter Zijlstra
0 siblings, 1 reply; 9+ messages in thread
From: Alexander Aring @ 2023-11-03 19:20 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: will, gfs2, boqun.feng, mark.rutland, linux-kernel
Hi,
On Fri, Nov 3, 2023 at 2:54 PM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Fri, Nov 03, 2023 at 12:16:34PM -0400, Alexander Aring wrote:
>
> > diff --git a/lib/refcount.c b/lib/refcount.c
> > index a207a8f22b3c..e28678f0f473 100644
> > --- a/lib/refcount.c
> > +++ b/lib/refcount.c
> > @@ -94,6 +94,34 @@ bool refcount_dec_not_one(refcount_t *r)
> > }
> > EXPORT_SYMBOL(refcount_dec_not_one);
> >
> > +bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
> > + void (*unlock)(void *lockptr), void *lockptr)
> > +{
> > + if (refcount_dec_not_one(r))
> > + return false;
> > +
> > + lock(lockptr);
> > + if (!refcount_dec_and_test(r)) {
> > + unlock(lockptr);
> > + return false;
> > + }
> > +
> > + return true;
> > +}
> > +EXPORT_SYMBOL(refcount_dec_and_lockptr);
>
> This is terrible, you're forcing indirect calls on everything.
>
Okay, I see. How about introducing a macro producing all the code at
preprocessor time?
- Alex
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [RFC 1/2] refcount: introduce generic lockptr funcs
2023-11-03 19:20 ` Alexander Aring
@ 2023-11-06 11:11 ` Peter Zijlstra
2023-11-06 15:12 ` Alexander Aring
0 siblings, 1 reply; 9+ messages in thread
From: Peter Zijlstra @ 2023-11-06 11:11 UTC (permalink / raw)
To: Alexander Aring; +Cc: will, gfs2, boqun.feng, mark.rutland, linux-kernel
On Fri, Nov 03, 2023 at 03:20:08PM -0400, Alexander Aring wrote:
> Hi,
>
> On Fri, Nov 3, 2023 at 2:54 PM Peter Zijlstra <peterz@infradead.org> wrote:
> >
> > On Fri, Nov 03, 2023 at 12:16:34PM -0400, Alexander Aring wrote:
> >
> > > diff --git a/lib/refcount.c b/lib/refcount.c
> > > index a207a8f22b3c..e28678f0f473 100644
> > > --- a/lib/refcount.c
> > > +++ b/lib/refcount.c
> > > @@ -94,6 +94,34 @@ bool refcount_dec_not_one(refcount_t *r)
> > > }
> > > EXPORT_SYMBOL(refcount_dec_not_one);
> > >
> > > +bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
> > > + void (*unlock)(void *lockptr), void *lockptr)
> > > +{
> > > + if (refcount_dec_not_one(r))
> > > + return false;
> > > +
> > > + lock(lockptr);
> > > + if (!refcount_dec_and_test(r)) {
> > > + unlock(lockptr);
> > > + return false;
> > > + }
> > > +
> > > + return true;
> > > +}
> > > +EXPORT_SYMBOL(refcount_dec_and_lockptr);
> >
> > This is terrible, you're forcing indirect calls on everything.
> >
>
> Okay, I see. How about introducing a macro producing all the code at
> preprocessor time?
__always_inline should work, then you get constant propagation for the
function pointer.
But indeed, perhaps a macro is more convenient vs the irq flags
argument. You'll then end up with something like:
#define __refcount_dec_and_lock(_ref, _lock, _unlock) \
({ bool _ret = false; \
if (!refcount_dec_not_one(_ref)) { \
_lock; \
if (!refcount_dec_and_test(_ref)) { \
_unlock; \
} else { \
_ret = true; \
} \
} \
_ret; \
})
bool refcount_dec_and_spinlock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
return __refcount_dec_and_lock(r, spin_lock_irqsave(*lock, *flags),
spin_unlock_irqrestore(*lock, *flags));
}
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [RFC 1/2] refcount: introduce generic lockptr funcs
2023-11-06 11:11 ` Peter Zijlstra
@ 2023-11-06 15:12 ` Alexander Aring
0 siblings, 0 replies; 9+ messages in thread
From: Alexander Aring @ 2023-11-06 15:12 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: will, gfs2, boqun.feng, mark.rutland, linux-kernel
Hi,
On Mon, Nov 6, 2023 at 6:11 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Fri, Nov 03, 2023 at 03:20:08PM -0400, Alexander Aring wrote:
> > Hi,
> >
> > On Fri, Nov 3, 2023 at 2:54 PM Peter Zijlstra <peterz@infradead.org> wrote:
> > >
> > > On Fri, Nov 03, 2023 at 12:16:34PM -0400, Alexander Aring wrote:
> > >
> > > > diff --git a/lib/refcount.c b/lib/refcount.c
> > > > index a207a8f22b3c..e28678f0f473 100644
> > > > --- a/lib/refcount.c
> > > > +++ b/lib/refcount.c
> > > > @@ -94,6 +94,34 @@ bool refcount_dec_not_one(refcount_t *r)
> > > > }
> > > > EXPORT_SYMBOL(refcount_dec_not_one);
> > > >
> > > > +bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
> > > > + void (*unlock)(void *lockptr), void *lockptr)
> > > > +{
> > > > + if (refcount_dec_not_one(r))
> > > > + return false;
> > > > +
> > > > + lock(lockptr);
> > > > + if (!refcount_dec_and_test(r)) {
> > > > + unlock(lockptr);
> > > > + return false;
> > > > + }
> > > > +
> > > > + return true;
> > > > +}
> > > > +EXPORT_SYMBOL(refcount_dec_and_lockptr);
> > >
> > > This is terrible, you're forcing indirect calls on everything.
> > >
> >
> > Okay, I see. How about introducing a macro producing all the code at
> > preprocessor time?
>
> __always_inline should work, then you get constant propagation for the
> function pointer.
>
Thanks, it is always good to learn something new.
> But indeed, perhaps a macro is more convenient vs the irq flags
> argument. You'll then end up with something like:
>
> #define __refcount_dec_and_lock(_ref, _lock, _unlock) \
> ({ bool _ret = false; \
> if (!refcount_dec_not_one(_ref)) { \
> _lock; \
> if (!refcount_dec_and_test(_ref)) { \
> _unlock; \
> } else { \
> _ret = true; \
> } \
> } \
> _ret; \
> })
>
>
> bool refcount_dec_and_spinlock_irqsave(refcount_t *r, spinlock_t *lock,
> unsigned long *flags)
> {
> return __refcount_dec_and_lock(r, spin_lock_irqsave(*lock, *flags),
> spin_unlock_irqrestore(*lock, *flags));
> }
I was thinking of solving the additional flags parameter with
prototype and argos macros e.g. TRACE_EVENT() is doing it, but this
version looks much better.
I will send a patch and do similar things with _kref_put_lock().
- Alex
^ permalink raw reply [flat|nested] 9+ messages in thread
* [RFC 1/2] refcount: introduce generic lockptr funcs
@ 2023-11-03 16:06 Alexander Aring
2023-11-03 16:14 ` Alexander Aring
0 siblings, 1 reply; 9+ messages in thread
From: Alexander Aring @ 2023-11-03 16:06 UTC (permalink / raw)
To: will; +Cc: gfs2, aahringo, peterz, boqun.feng, mark.rutland
This patch introduce lockptr refcount operations. Currently refcount has
a lot of refcount_dec_and_lock() functionality for most common used
locktype. Those functions look mostly all the same and is duplicated
inside the refcount implementation. Instead of introducing a new whole
refcount_dec_and_lock() functionality e.g. for rwlock_t and their _bh
variants this patch will introduce lockptr. A lockptr is just a void *
and refers to the actual locking instance that can even be an own
locking type. Over the passed callbacks for lock and unlock operations
the void *lockptr becomes to the real thing by casting it and do the
locktype specific lock operation.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
include/linux/refcount.h | 15 +++++++
lib/refcount.c | 92 ++++++++++++++++++++++++++++------------
2 files changed, 80 insertions(+), 27 deletions(-)
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a62fcca97486..7b1fb85212cc 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -366,4 +366,19 @@ extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags) __cond_acquires(lock);
+extern bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
+ void (*unlock)(void *lockptr), void *lockptr) __cond_acquires(lockptr);
+
+extern void lockptr_mutex_lock(void *lockptr) __acquires(lockptr);
+extern void lockptr_mutex_unlock(void *lockptr) __releases(lockptr);
+extern void lockptr_spin_lock(void *lockptr) __acquires(lockptr);
+extern void lockptr_spin_unlock(void *lockptr) __releases(lockptr);
+
+struct lockptr_irqsave_data {
+ void *lockptr;
+ unsigned long *flags;
+};
+extern void lockptr_irqsave(void *lockptr) __acquires(lockptr);
+extern void lockptr_irqsave(void *lockptr) __releases(lockptr);
+
#endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3c..e28678f0f473 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -94,6 +94,34 @@ bool refcount_dec_not_one(refcount_t *r)
}
EXPORT_SYMBOL(refcount_dec_not_one);
+bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
+ void (*unlock)(void *lockptr), void *lockptr)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ lock(lockptr);
+ if (!refcount_dec_and_test(r)) {
+ unlock(lockptr);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lockptr);
+
+void lockptr_mutex_lock(void *lockptr)
+{
+ mutex_lock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_mutex_lock);
+
+void lockptr_mutex_unlock(void *lockptr)
+{
+ mutex_unlock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_mutex_unlock);
+
/**
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
* refcount to 0
@@ -112,18 +140,22 @@ EXPORT_SYMBOL(refcount_dec_not_one);
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
- if (refcount_dec_not_one(r))
- return false;
+ return refcount_dec_and_lockptr(r, lockptr_mutex_lock,
+ lockptr_mutex_unlock, lock);
+}
+EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
- mutex_lock(lock);
- if (!refcount_dec_and_test(r)) {
- mutex_unlock(lock);
- return false;
- }
+void lockptr_spin_lock(void *lockptr)
+{
+ spin_lock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_spin_lock);
- return true;
+void lockptr_spin_unlock(void *lockptr)
+{
+ spin_unlock(lockptr);
}
-EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
+EXPORT_SYMBOL(lockptr_spin_unlock);
/**
* refcount_dec_and_lock - return holding spinlock if able to decrement
@@ -143,18 +175,26 @@ EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
- if (refcount_dec_not_one(r))
- return false;
+ return refcount_dec_and_lockptr(r, lockptr_spin_lock,
+ lockptr_spin_unlock, lock);
+}
+EXPORT_SYMBOL(refcount_dec_and_lock);
- spin_lock(lock);
- if (!refcount_dec_and_test(r)) {
- spin_unlock(lock);
- return false;
- }
+void lockptr_lock_irqsave(void *lockptr)
+{
+ struct lockptr_irqsave_data *d = lockptr;
- return true;
+ spin_lock_irqsave(d->lockptr, *d->flags);
}
-EXPORT_SYMBOL(refcount_dec_and_lock);
+EXPORT_SYMBOL(lockptr_lock_irqsave);
+
+void lockptr_unlock_irqsave(void *lockptr)
+{
+ struct lockptr_irqsave_data *d = lockptr;
+
+ spin_unlock_irqrestore(d->lockptr, *d->flags);
+}
+EXPORT_SYMBOL(lockptr_unlock_irqsave);
/**
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
@@ -172,15 +212,13 @@ EXPORT_SYMBOL(refcount_dec_and_lock);
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
- if (refcount_dec_not_one(r))
- return false;
+ struct lockptr_irqsave_data d = {
+ .lockptr = lock,
+ .flags = flags,
+ };
- spin_lock_irqsave(lock, *flags);
- if (!refcount_dec_and_test(r)) {
- spin_unlock_irqrestore(lock, *flags);
- return false;
- }
-
- return true;
+ return refcount_dec_and_lockptr(r, lockptr_lock_irqsave,
+ lockptr_unlock_irqsave, &d);
}
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
+
--
2.39.3
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [RFC 1/2] refcount: introduce generic lockptr funcs
2023-11-03 16:06 Alexander Aring
@ 2023-11-03 16:14 ` Alexander Aring
2023-11-03 16:16 ` Alexander Aring
0 siblings, 1 reply; 9+ messages in thread
From: Alexander Aring @ 2023-11-03 16:14 UTC (permalink / raw)
To: will; +Cc: gfs2, peterz, boqun.feng, mark.rutland
Hi,
On Fri, Nov 3, 2023 at 12:07 PM Alexander Aring <aahringo@redhat.com> wrote:
>
> This patch introduce lockptr refcount operations. Currently refcount has
> a lot of refcount_dec_and_lock() functionality for most common used
> locktype. Those functions look mostly all the same and is duplicated
> inside the refcount implementation. Instead of introducing a new whole
> refcount_dec_and_lock() functionality e.g. for rwlock_t and their _bh
> variants this patch will introduce lockptr. A lockptr is just a void *
> and refers to the actual locking instance that can even be an own
> locking type. Over the passed callbacks for lock and unlock operations
> the void *lockptr becomes to the real thing by casting it and do the
> locktype specific lock operation.
just an RFC to check if there is any interest to introduce something
like this. I think the idea is clear. My current use case is to have
rwlock_t and its bh lock operations using something like
refcount_dec_and_write_lock_bh() and later kref_put_write_lock_bh(). I
try to avoid copying some copy code again. I am open to any better
design change. Or telling me to just duplicate code again for what I
need it for. However this has the advantage that somebody can use
their "own" locktype implementation out of kernel core code.
Thanks.
- Alex
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [RFC 1/2] refcount: introduce generic lockptr funcs
2023-11-03 16:14 ` Alexander Aring
@ 2023-11-03 16:16 ` Alexander Aring
0 siblings, 0 replies; 9+ messages in thread
From: Alexander Aring @ 2023-11-03 16:16 UTC (permalink / raw)
To: will; +Cc: gfs2, peterz, boqun.feng, mark.rutland
Hi,
On Fri, Nov 3, 2023 at 12:14 PM Alexander Aring <aahringo@redhat.com> wrote:
>
> Hi,
>
> On Fri, Nov 3, 2023 at 12:07 PM Alexander Aring <aahringo@redhat.com> wrote:
> >
> > This patch introduce lockptr refcount operations. Currently refcount has
> > a lot of refcount_dec_and_lock() functionality for most common used
> > locktype. Those functions look mostly all the same and is duplicated
> > inside the refcount implementation. Instead of introducing a new whole
> > refcount_dec_and_lock() functionality e.g. for rwlock_t and their _bh
> > variants this patch will introduce lockptr. A lockptr is just a void *
> > and refers to the actual locking instance that can even be an own
> > locking type. Over the passed callbacks for lock and unlock operations
> > the void *lockptr becomes to the real thing by casting it and do the
> > locktype specific lock operation.
>
> just an RFC to check if there is any interest to introduce something
> like this. I think the idea is clear. My current use case is to have
> rwlock_t and its bh lock operations using something like
> refcount_dec_and_write_lock_bh() and later kref_put_write_lock_bh(). I
> try to avoid copying some copy code again. I am open to any better
> design change. Or telling me to just duplicate code again for what I
> need it for. However this has the advantage that somebody can use
> their "own" locktype implementation out of kernel core code.
I will resubmit this RFC series because I forgot to cc the linux
kernel mailing list that indeed makes sense here.
- Alex
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2023-11-06 15:12 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-11-03 16:16 [RFC 1/2] refcount: introduce generic lockptr funcs Alexander Aring
2023-11-03 16:16 ` [RFC 2/2] kref: introduce kref_put_lockptr() and use lockptr Alexander Aring
2023-11-03 18:54 ` [RFC 1/2] refcount: introduce generic lockptr funcs Peter Zijlstra
2023-11-03 19:20 ` Alexander Aring
2023-11-06 11:11 ` Peter Zijlstra
2023-11-06 15:12 ` Alexander Aring
-- strict thread matches above, loose matches on Subject: below --
2023-11-03 16:06 Alexander Aring
2023-11-03 16:14 ` Alexander Aring
2023-11-03 16:16 ` Alexander Aring
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox