From: Alexander Aring <aahringo@redhat.com>
To: teigland@redhat.com
Cc: gfs2@lists.linux.dev, aahringo@redhat.com
Subject: [RFC dlm/next 7/9] dlm: drop scand kthread and use timers
Date: Wed, 10 Apr 2024 09:48:56 -0400 [thread overview]
Message-ID: <20240410134858.3295266-8-aahringo@redhat.com> (raw)
In-Reply-To: <20240410134858.3295266-1-aahringo@redhat.com>
Currently the scand kthread acts like a garbage collection for expired
rsbs on toss list to clean them up after a certain timeout. It triggers
every couple of seconds and iterates over the toss list while holding
ls_rsbtbl_lock for the whole hash bucket iteration. To reduce the amount
of holding the ls_rsbtbl_lock time we handle free of cached rsbs (tossed
rsbs) on a per rsb timer. If it expires the rsb deletes itseld out of
rsb data structures. Addtional the timer can run on any cpu instead of
scand that was only running on one cpu, so there should be a performance
speedup handling such freeing of resources.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
fs/dlm/dlm_internal.h | 15 ++-
fs/dlm/lock.c | 272 +++++++++++++++++++++---------------------
fs/dlm/lockspace.c | 105 +++-------------
fs/dlm/recover.c | 24 +++-
4 files changed, 178 insertions(+), 238 deletions(-)
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index cf43b97cf3e5..0ab76ed8685e 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -320,6 +320,7 @@ struct dlm_rsb {
uint32_t res_lvbseq;
uint32_t res_hash;
unsigned long res_toss_time;
+ struct timer_list res_toss_timer;
uint32_t res_first_lkid;
struct list_head res_lookup; /* lkbs waiting on first */
union {
@@ -368,6 +369,7 @@ enum rsb_flags {
RSB_RECOVER_GRANT,
RSB_RECOVER_LVB_INVAL,
RSB_TOSS,
+ RSB_TIMER_KILLED,
};
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
@@ -584,8 +586,6 @@ struct dlm_ls {
spinlock_t ls_lkbidr_spin;
struct rhashtable ls_rsbtbl;
-#define DLM_RTF_SHRINK_BIT 0
- unsigned long ls_rsbtbl_flags;
spinlock_t ls_rsbtbl_lock;
struct list_head ls_toss;
@@ -601,9 +601,6 @@ struct dlm_ls {
int ls_new_rsb_count;
struct list_head ls_new_rsb; /* new rsb structs */
- char *ls_remove_names[DLM_REMOVE_NAMES_MAX];
- int ls_remove_lens[DLM_REMOVE_NAMES_MAX];
-
struct list_head ls_nodes; /* current nodes in ls */
struct list_head ls_nodes_gone; /* dead node list, recovery */
int ls_num_nodes; /* number of nodes in ls */
@@ -698,6 +695,13 @@ struct dlm_ls {
* dlm_ls_stop() clears this to tell dlm locking routines that they should
* quit what they are doing so recovery can run. dlm_recoverd sets
* this after recovery is finished.
+ *
+ * LSFL_TIMER_KILLED - a per lockspace switch that will kill all timers in
+ * sense of they don't do anything anymore. To set this flag the caller
+ * need to held the ls_in_recovery write lock. It's useful to just don't iterate
+ * over all timers and call timer_shutdown_sync() to stop every res_toss_timer
+ * that is currently ongoing and queued e.g. when release_lockspace() is called
+ * and we need to force to stop everything.
*/
#define LSFL_RECOVER_STOP 0
@@ -712,6 +716,7 @@ struct dlm_ls {
#define LSFL_CB_DELAY 9
#define LSFL_NODIR 10
#define LSFL_RECV_MSG_BLOCKED 11
+#define LSFL_TIMER_KILLED 12
#define DLM_PROC_FLAGS_CLOSING 1
#define DLM_PROC_FLAGS_COMPAT 2
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index fee1a4164fc1..b3f0b0445812 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -320,6 +320,11 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
* Basic operations on rsb's and lkb's
*/
+static inline unsigned long rsb_toss_jiffies(const struct dlm_rsb *r)
+{
+ return jiffies + (READ_ONCE(dlm_config.ci_toss_secs) * HZ);
+}
+
/* This is only called to add a reference when the code already holds
a valid reference to the rsb, so there's no need for locking. */
@@ -416,6 +421,108 @@ static int pre_rsb_struct(struct dlm_ls *ls)
return 0;
}
+/* Caller must held ls_rsbtbl_lock and need to be called every time
+ * when either the rsb enters toss state or the toss state changes
+ * the dir/master nodeid.
+ */
+static void rsb_mod_timer(struct dlm_ls *ls, struct dlm_rsb *r)
+{
+ int our_nodeid = dlm_our_nodeid();
+
+ /* If we're the directory record for this rsb, and
+ * we're not the master of it, then we need to wait
+ * for the master node to send us a dir remove for
+ * before removing the dir record.
+ */
+ if (!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) == our_nodeid)) {
+ /* indicates we waiting for send_remove message */
+ r->res_toss_time = 0;
+ rsb_set_flag(r, RSB_TIMER_KILLED);
+ timer_delete(&r->res_toss_timer);
+ return;
+ }
+
+ rsb_clear_flag(r, RSB_TIMER_KILLED);
+ r->res_toss_time = jiffies;
+ mod_timer(&r->res_toss_timer, rsb_toss_jiffies(r));
+}
+
+static void dlm_rsb_toss_timer(struct timer_list *timer)
+{
+ struct dlm_rsb *r = container_of(timer, struct dlm_rsb, res_toss_timer);
+ int our_nodeid = dlm_our_nodeid();
+ struct dlm_ls *ls = r->res_ls;
+ int rv;
+
+ rv = dlm_lock_recovery_try(ls);
+ if (!rv) {
+ /* rearm timer */
+ mod_timer(&r->res_toss_timer, rsb_toss_jiffies(r));
+ return;
+ }
+
+ /* check if the timer was killed on a per lockspace basis
+ * this flag is set when the ls_in_recovery lock is held
+ * as write lock.
+ */
+ if (test_bit(LSFL_TIMER_KILLED, &ls->ls_flags)) {
+ dlm_unlock_recovery(ls);
+ return;
+ }
+
+ rv = spin_trylock(&ls->ls_rsbtbl_lock);
+ if (!rv) {
+ dlm_unlock_recovery(ls);
+ /* rearm timer */
+ mod_timer(&r->res_toss_timer, rsb_toss_jiffies(r));
+ return;
+ }
+
+ /* timer got killed we do nothing, in case of the
+ * timer got rearmed because a rsb switched to toss,
+ * to keep and toss again and the timer of the first
+ * toss state is still executed we check on if the
+ * timer was rearmed then we have a new toss timeout
+ * so we check if the timer is currently pending as
+ * well.
+ */
+ if (rsb_flag(r, RSB_TIMER_KILLED) ||
+ timer_pending(&r->res_toss_timer)) {
+ spin_unlock(&ls->ls_rsbtbl_lock);
+ dlm_unlock_recovery(ls);
+ return;
+ }
+
+ list_del(&r->res_rsbs_list);
+ rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
+ dlm_rhash_rsb_params);
+
+ /* not necessary to held the ls_rsbtbl_lock when
+ * calling send_remove() its gone from DLM now
+ */
+ spin_unlock(&ls->ls_rsbtbl_lock);
+
+ /* no rsb in this state should ever run a timer */
+ WARN_ON(!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) == our_nodeid));
+
+ /* We're the master of this rsb but we're not
+ * the directory record, so we need to tell the
+ * dir node to remove the dir record
+ */
+ if (!dlm_no_directory(ls) &&
+ (r->res_master_nodeid == our_nodeid) &&
+ (dlm_dir_nodeid(r) != our_nodeid))
+ send_remove(r);
+
+ dlm_unlock_recovery(ls);
+
+ free_toss_rsb(r);
+}
+
/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
unlock any spinlocks, go back and call pre_rsb_struct again.
Otherwise, take an rsb off the list and return it. */
@@ -454,6 +561,9 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
INIT_LIST_HEAD(&r->res_recover_list);
INIT_LIST_HEAD(&r->res_masters_list);
+ timer_setup(&r->res_toss_timer, dlm_rsb_toss_timer,
+ TIMER_DEFERRABLE);
+
*r_ret = r;
return 0;
}
@@ -638,6 +748,15 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
* valid for keep state rsbs
*/
kref_init(&r->res_ref);
+ /* force kill timer, we can't wait here that the timer
+ * is done exectuing but we hold the ls_rsbtbl_lock lock
+ * to make sure the timer does not do anything anymore.
+ * The timer_delete() is just a optimization if the timer
+ * is pending we stop to be executec if expired.
+ */
+ rsb_set_flag(r, RSB_TIMER_KILLED);
+ timer_delete(&r->res_toss_timer);
+
goto out_unlock;
@@ -777,6 +896,15 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
* valid for keep state rsbs
*/
kref_init(&r->res_ref);
+ /* force kill timer, we can't wait here that the timer
+ * is done exectuing but we hold the ls_rsbtbl_lock lock
+ * to make sure the timer does not do anything anymore.
+ * The timer_delete() is just a optimization if the timer
+ * is pending we stop to be executec if expired.
+ */
+ rsb_set_flag(r, RSB_TIMER_KILLED);
+ timer_delete(&r->res_toss_timer);
+
goto out_unlock;
@@ -1050,7 +1178,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
r_nodeid, result);
- r->res_toss_time = jiffies;
+ rsb_mod_timer(ls, r);
/* the rsb was inactive (on toss list) */
spin_unlock_bh(&ls->ls_rsbtbl_lock);
@@ -1070,7 +1198,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
r->res_master_nodeid = from_nodeid;
r->res_nodeid = from_nodeid;
kref_init(&r->res_ref);
- r->res_toss_time = jiffies;
rsb_set_flag(r, RSB_TOSS);
error = rsb_insert(r, &ls->ls_rsbtbl);
@@ -1082,6 +1209,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
}
list_add(&r->res_rsbs_list, &ls->ls_toss);
+ rsb_mod_timer(ls, r);
if (result)
*result = DLM_LU_ADD;
@@ -1126,8 +1254,8 @@ static void toss_rsb(struct kref *kref)
DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
rsb_set_flag(r, RSB_TOSS);
list_move(&r->res_rsbs_list, &ls->ls_toss);
- r->res_toss_time = jiffies;
- set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags);
+ rsb_mod_timer(ls, r);
+
if (r->res_lvbptr) {
dlm_free_lvb(r->res_lvbptr);
r->res_lvbptr = NULL;
@@ -1154,6 +1282,8 @@ void free_toss_rsb(struct dlm_rsb *r)
* and it can be freed.
*/
+ WARN_ON_ONCE(timer_pending(&r->res_toss_timer));
+
DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
@@ -1572,140 +1702,6 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb,
return error;
}
-static void shrink_bucket(struct dlm_ls *ls)
-{
- struct dlm_rsb *r, *safe;
- char *name;
- int our_nodeid = dlm_our_nodeid();
- int remote_count = 0;
- int need_shrink = 0;
- int i, len, rv;
-
- memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
-
- spin_lock_bh(&ls->ls_rsbtbl_lock);
-
- if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags)) {
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
- return;
- }
-
- list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) {
- /* If we're the directory record for this rsb, and
- we're not the master of it, then we need to wait
- for the master node to send us a dir remove for
- before removing the dir record. */
-
- if (!dlm_no_directory(ls) &&
- (r->res_master_nodeid != our_nodeid) &&
- (dlm_dir_nodeid(r) == our_nodeid)) {
- continue;
- }
-
- need_shrink = 1;
-
- if (!time_after_eq(jiffies, r->res_toss_time +
- dlm_config.ci_toss_secs * HZ)) {
- continue;
- }
-
- if (!dlm_no_directory(ls) &&
- (r->res_master_nodeid == our_nodeid) &&
- (dlm_dir_nodeid(r) != our_nodeid)) {
-
- /* We're the master of this rsb but we're not
- the directory record, so we need to tell the
- dir node to remove the dir record. */
-
- ls->ls_remove_lens[remote_count] = r->res_length;
- memcpy(ls->ls_remove_names[remote_count], r->res_name,
- DLM_RESNAME_MAXLEN);
- remote_count++;
-
- if (remote_count >= DLM_REMOVE_NAMES_MAX)
- break;
- continue;
- }
-
- list_del(&r->res_rsbs_list);
- rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
- dlm_rhash_rsb_params);
- free_toss_rsb(r);
- }
-
- if (need_shrink)
- set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags);
- else
- clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl_flags);
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
-
- /*
- * While searching for rsb's to free, we found some that require
- * remote removal. We leave them in place and find them again here
- * so there is a very small gap between removing them from the toss
- * list and sending the removal. Keeping this gap small is
- * important to keep us (the master node) from being out of sync
- * with the remote dir node for very long.
- */
-
- for (i = 0; i < remote_count; i++) {
- name = ls->ls_remove_names[i];
- len = ls->ls_remove_lens[i];
-
- spin_lock_bh(&ls->ls_rsbtbl_lock);
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
- if (rv) {
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
- log_error(ls, "remove_name not found %s", name);
- continue;
- }
-
- if (!rsb_flag(r, RSB_TOSS)) {
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
- log_debug(ls, "remove_name not toss %s", name);
- continue;
- }
-
- if (r->res_master_nodeid != our_nodeid) {
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
- log_debug(ls, "remove_name master %d dir %d our %d %s",
- r->res_master_nodeid, r->res_dir_nodeid,
- our_nodeid, name);
- continue;
- }
-
- if (r->res_dir_nodeid == our_nodeid) {
- /* should never happen */
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
- log_error(ls, "remove_name dir %d master %d our %d %s",
- r->res_dir_nodeid, r->res_master_nodeid,
- our_nodeid, name);
- continue;
- }
-
- if (!time_after_eq(jiffies, r->res_toss_time +
- dlm_config.ci_toss_secs * HZ)) {
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
- log_debug(ls, "remove_name toss_time %lu now %lu %s",
- r->res_toss_time, jiffies, name);
- continue;
- }
-
- list_del(&r->res_rsbs_list);
- rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
- dlm_rhash_rsb_params);
- send_remove(r);
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
-
- free_toss_rsb(r);
- }
-}
-
-void dlm_scan_rsbs(struct dlm_ls *ls)
-{
- shrink_bucket(ls);
-}
-
/* lkb is master or local copy */
static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 890e1a4cf787..df7ff5824830 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -29,8 +29,6 @@ static int ls_count;
static struct mutex ls_lock;
static struct list_head lslist;
static spinlock_t lslist_lock;
-static struct task_struct * scand_task;
-
static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
{
@@ -247,64 +245,6 @@ void dlm_lockspace_exit(void)
kset_unregister(dlm_kset);
}
-static struct dlm_ls *find_ls_to_scan(void)
-{
- struct dlm_ls *ls;
-
- spin_lock_bh(&lslist_lock);
- list_for_each_entry(ls, &lslist, ls_list) {
- if (time_after_eq(jiffies, ls->ls_scan_time +
- dlm_config.ci_scan_secs * HZ)) {
- atomic_inc(&ls->ls_count);
- spin_unlock_bh(&lslist_lock);
- return ls;
- }
- }
- spin_unlock_bh(&lslist_lock);
- return NULL;
-}
-
-static int dlm_scand(void *data)
-{
- struct dlm_ls *ls;
-
- while (!kthread_should_stop()) {
- ls = find_ls_to_scan();
- if (ls) {
- if (dlm_lock_recovery_try(ls)) {
- ls->ls_scan_time = jiffies;
- dlm_scan_rsbs(ls);
- dlm_unlock_recovery(ls);
- } else {
- ls->ls_scan_time += HZ;
- }
-
- dlm_put_lockspace(ls);
- continue;
- }
- schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
- }
- return 0;
-}
-
-static int dlm_scand_start(void)
-{
- struct task_struct *p;
- int error = 0;
-
- p = kthread_run(dlm_scand, NULL, "dlm_scand");
- if (IS_ERR(p))
- error = PTR_ERR(p);
- else
- scand_task = p;
- return error;
-}
-
-static void dlm_scand_stop(void)
-{
- kthread_stop(scand_task);
-}
-
struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
{
struct dlm_ls *ls;
@@ -385,22 +325,9 @@ static int threads_start(void)
/* Thread for sending/receiving messages for all lockspace's */
error = dlm_midcomms_start();
- if (error) {
+ if (error)
log_print("cannot start dlm midcomms %d", error);
- goto fail;
- }
- error = dlm_scand_start();
- if (error) {
- log_print("cannot start dlm_scand thread %d", error);
- goto midcomms_fail;
- }
-
- return 0;
-
- midcomms_fail:
- dlm_midcomms_stop();
- fail:
return error;
}
@@ -412,7 +339,7 @@ static int new_lockspace(const char *name, const char *cluster,
struct dlm_ls *ls;
int do_unreg = 0;
int namelen = strlen(name);
- int i, error;
+ int error;
if (namelen > DLM_LOCKSPACE_LEN || namelen == 0)
return -EINVAL;
@@ -503,13 +430,6 @@ static int new_lockspace(const char *name, const char *cluster,
if (error)
goto out_lsfree;
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
- ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
- GFP_KERNEL);
- if (!ls->ls_remove_names[i])
- goto out_rsbtbl;
- }
-
idr_init(&ls->ls_lkbidr);
spin_lock_init(&ls->ls_lkbidr_spin);
@@ -661,9 +581,6 @@ static int new_lockspace(const char *name, const char *cluster,
kfree(ls->ls_recover_buf);
out_lkbidr:
idr_destroy(&ls->ls_lkbidr);
- out_rsbtbl:
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
- kfree(ls->ls_remove_names[i]);
rhashtable_destroy(&ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
@@ -696,7 +613,6 @@ static int __dlm_new_lockspace(const char *name, const char *cluster,
if (error > 0)
error = 0;
if (!ls_count) {
- dlm_scand_stop();
dlm_midcomms_shutdown();
dlm_midcomms_stop();
}
@@ -771,13 +687,19 @@ static void rhash_free_rsb(void *ptr, void *arg)
{
struct dlm_rsb *rsb = ptr;
+ /* be sure no timer callback is still
+ * running before free rsb, it was killed before
+ * but killing will not guarantee that the callback
+ * is still queued/or actually in excution.
+ */
+ timer_shutdown_sync(&rsb->res_toss_timer);
dlm_free_rsb(rsb);
}
static int release_lockspace(struct dlm_ls *ls, int force)
{
struct dlm_rsb *rsb;
- int i, busy, rv;
+ int busy, rv;
busy = lockspace_busy(ls, force);
@@ -812,8 +734,12 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_recoverd_stop(ls);
+ /* dummy all res_toss_timer handling */
+ down_write(&ls->ls_in_recovery);
+ set_bit(LSFL_TIMER_KILLED, &ls->ls_flags);
+ up_write(&ls->ls_in_recovery);
+
if (ls_count == 1) {
- dlm_scand_stop();
dlm_clear_members(ls);
dlm_midcomms_shutdown();
}
@@ -839,9 +765,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
*/
rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
- kfree(ls->ls_remove_names[i]);
-
while (!list_empty(&ls->ls_new_rsb)) {
rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
res_hashchain);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index d43189532b14..fb726dad3c53 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -881,18 +881,34 @@ void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list)
void dlm_clear_toss(struct dlm_ls *ls)
{
- struct dlm_rsb *r, *safe;
unsigned int count = 0;
+ struct dlm_rsb *r;
+
+ while (1) {
+ spin_lock_bh(&ls->ls_rsbtbl_lock);
+ r = list_first_entry_or_null(&ls->ls_toss, struct dlm_rsb,
+ res_rsbs_list);
+ if (!r) {
+ spin_unlock_bh(&ls->ls_rsbtbl_lock);
+ break;
+ }
- spin_lock_bh(&ls->ls_rsbtbl_lock);
- list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) {
list_del(&r->res_rsbs_list);
rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
dlm_rhash_rsb_params);
+ rsb_set_flag(r, RSB_TIMER_KILLED);
+ spin_unlock_bh(&ls->ls_rsbtbl_lock);
+
+ /* Wait until all killed currently running timer
+ * callbacks are done. Killed as RSB_TIMER_KILLED
+ * is set and the timer callback does nothing
+ * anymore and dlm_clear_toss() takes over.
+ */
+ timer_shutdown_sync(&r->res_toss_timer);
+
free_toss_rsb(r);
count++;
}
- spin_unlock_bh(&ls->ls_rsbtbl_lock);
if (count)
log_rinfo(ls, "dlm_clear_toss %u done", count);
--
2.43.0
next prev parent reply other threads:[~2024-04-10 13:49 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-10 13:48 [RFC dlm/next 0/9] dlm: sand fix, rhashtable, timers and lookup hotpath speedup Alexander Aring
2024-04-10 13:48 ` [RFC dlm/next 1/9] dlm: increment ls_count on find_ls_to_scan() Alexander Aring
2024-04-10 13:48 ` [RFC dlm/next 2/9] dlm: change to non per bucket hashtable lock Alexander Aring
2024-04-10 13:48 ` [RFC dlm/next 3/9] dlm: merge toss and keep hash into one Alexander Aring
2024-04-10 13:48 ` [RFC dlm/next 4/9] dlm: fix avoid rsb hold during debugfs dump Alexander Aring
2024-04-10 13:48 ` [RFC dlm/next 5/9] dlm: switch to use rhashtable for rsbs Alexander Aring
2024-04-10 13:48 ` [RFC dlm/next 6/9] dlm: remove refcounting if rsb is on toss Alexander Aring
2024-04-10 13:48 ` Alexander Aring [this message]
2024-04-11 14:00 ` [RFC dlm/next 7/9] dlm: drop scand kthread and use timers Alexander Aring
2024-04-10 13:48 ` [RFC dlm/next 8/9] dlm: likely read lock path for rsb lookup Alexander Aring
2024-04-11 13:58 ` Alexander Aring
2024-04-10 13:48 ` [RFC dlm/next 9/9] dlm: convert lkbidr to rwlock Alexander Aring
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240410134858.3295266-8-aahringo@redhat.com \
--to=aahringo@redhat.com \
--cc=gfs2@lists.linux.dev \
--cc=teigland@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox