From: James Simmons <jsimmons@infradead.org>
To: Andreas Dilger <adilger@whamcloud.com>,
Oleg Drokin <green@whamcloud.com>, NeilBrown <neilb@suse.de>
Cc: Vitaly Fertman <vitaly.fertman@hpe.com>,
Lustre Development List <lustre-devel@lists.lustre.org>
Subject: [lustre-devel] [PATCH 35/40] lustre: ldlm: BL_AST lock cancel still can be batched
Date: Sun, 9 Apr 2023 08:13:15 -0400 [thread overview]
Message-ID: <1681042400-15491-36-git-send-email-jsimmons@infradead.org> (raw)
In-Reply-To: <1681042400-15491-1-git-send-email-jsimmons@infradead.org>
From: Vitaly Fertman <vitaly.fertman@hpe.com>
The previous patch makes BLAST locks to be cancelled separately.
However the main problem is flushing the data under the other batched
locks, thus still possible to batch it with those with no data.
Could be optimized for not yet CANCELLING locks only, otherwise it is
already in the l_bl_ast list.
Fixes: 1ada5c64 ("lustre: ldlm: send the cancel RPC asap")
WC-bug-id: https://jira.whamcloud.com/browse/LU-16285
Lustre-commit: 9d79f92076b6a9ca7 ("LU-16285 ldlm: BL_AST lock cancel still can be batched")
Signed-off-by: Vitaly Fertman <vitaly.fertman@hpe.com>
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/50158
Reviewed-by: Yang Sheng <ys@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
---
fs/lustre/include/lustre_dlm.h | 1 -
fs/lustre/ldlm/ldlm_lockd.c | 3 ++-
fs/lustre/ldlm/ldlm_request.c | 42 +++++++++++++++++++++++++-----------------
3 files changed, 27 insertions(+), 19 deletions(-)
diff --git a/fs/lustre/include/lustre_dlm.h b/fs/lustre/include/lustre_dlm.h
index 3a4f152..d08c48f 100644
--- a/fs/lustre/include/lustre_dlm.h
+++ b/fs/lustre/include/lustre_dlm.h
@@ -593,7 +593,6 @@ enum ldlm_cancel_flags {
LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
* in the same RPC
*/
- LCF_ONE_LOCK = 0x8, /* Cancel locks pack only one lock. */
};
struct ldlm_flock {
diff --git a/fs/lustre/ldlm/ldlm_lockd.c b/fs/lustre/ldlm/ldlm_lockd.c
index 3a085db..abd853b 100644
--- a/fs/lustre/ldlm/ldlm_lockd.c
+++ b/fs/lustre/ldlm/ldlm_lockd.c
@@ -700,7 +700,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
* we can tell the server we have no lock. Otherwise, we
* should send cancel after dropping the cache.
*/
- if (ldlm_is_ast_sent(lock) || ldlm_is_failed(lock)) {
+ if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
+ ldlm_is_failed(lock)) {
LDLM_DEBUG(lock,
"callback on lock %#llx - lock disappeared",
dlm_req->lock_handle[0].cookie);
diff --git a/fs/lustre/ldlm/ldlm_request.c b/fs/lustre/ldlm/ldlm_request.c
index ef3ad28..11071d9 100644
--- a/fs/lustre/ldlm/ldlm_request.c
+++ b/fs/lustre/ldlm/ldlm_request.c
@@ -1055,8 +1055,9 @@ static int _ldlm_cancel_pack(struct ptlrpc_request *req, struct ldlm_lock *lock,
* Prepare and send a batched cancel RPC. It will include @count lock
* handles of locks given in @cancels list.
*/
-static int ldlm_cli_cancel_req(struct obd_export *exp, void *ptr,
- int count, enum ldlm_cancel_flags flags)
+static int ldlm_cli_cancel_req(struct obd_export *exp, struct ldlm_lock *lock,
+ struct list_head *head, int count,
+ enum ldlm_cancel_flags flags)
{
struct ptlrpc_request *req = NULL;
struct obd_import *imp;
@@ -1065,6 +1066,7 @@ static int ldlm_cli_cancel_req(struct obd_export *exp, void *ptr,
LASSERT(exp);
LASSERT(count > 0);
+ LASSERT(!head || !lock);
CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
@@ -1104,10 +1106,7 @@ static int ldlm_cli_cancel_req(struct obd_export *exp, void *ptr,
req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
ptlrpc_at_set_req_timeout(req);
- if (flags & LCF_ONE_LOCK)
- rc = _ldlm_cancel_pack(req, ptr, NULL, count);
- else
- rc = _ldlm_cancel_pack(req, NULL, ptr, count);
+ rc = _ldlm_cancel_pack(req, lock, head, count);
if (rc == 0) {
ptlrpc_req_finished(req);
sent = count;
@@ -1265,7 +1264,8 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
enum ldlm_cancel_flags flags)
{
struct obd_export *exp;
- int avail, count = 1, bl_ast = 0;
+ int avail, count = 1, separate = 0;
+ enum ldlm_lru_flags lru_flags = 0;
u64 rc = 0;
struct ldlm_namespace *ns;
struct ldlm_lock *lock;
@@ -1286,7 +1286,8 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
LDLM_LOCK_RELEASE(lock);
return 0;
}
- bl_ast = 1;
+ if (ldlm_is_canceling(lock))
+ separate = 1;
} else if (ldlm_is_canceling(lock)) {
/* Lock is being canceled and the caller doesn't want to wait */
unlock_res_and_lock(lock);
@@ -1308,11 +1309,18 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
if (rc == LDLM_FL_LOCAL_ONLY || flags & LCF_LOCAL) {
LDLM_LOCK_RELEASE(lock);
return 0;
+ } else if (rc == LDLM_FL_BL_AST) {
+ /* BL_AST lock must not wait. */
+ lru_flags |= LDLM_LRU_FLAG_NO_WAIT;
}
exp = lock->l_conn_export;
- if (bl_ast) { /* Send RPC immedaitly for LDLM_FL_BL_AST */
- ldlm_cli_cancel_req(exp, lock, count, flags | LCF_ONE_LOCK);
+ /* If a lock has been taken from lru for a batched cancel and a later
+ * BL_AST came, send a CANCEL RPC individually for it right away, not
+ * waiting for the batch to be handled.
+ */
+ if (separate) {
+ ldlm_cli_cancel_req(exp, lock, NULL, 1, flags);
LDLM_LOCK_RELEASE(lock);
return 0;
}
@@ -1332,7 +1340,7 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
ns = ldlm_lock_to_ns(lock);
count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
- LCF_BL_AST, 0);
+ LCF_BL_AST, lru_flags);
}
ldlm_cli_cancel_list(&cancels, count, NULL, flags);
@@ -1345,7 +1353,7 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
* Return the number of cancelled locks.
*/
int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- enum ldlm_cancel_flags flags)
+ enum ldlm_cancel_flags cancel_flags)
{
LIST_HEAD(head);
struct ldlm_lock *lock, *next;
@@ -1357,7 +1365,7 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
if (left-- == 0)
break;
- if (flags & LCF_LOCAL) {
+ if (cancel_flags & LCF_LOCAL) {
rc = LDLM_FL_LOCAL_ONLY;
ldlm_lock_cancel(lock);
} else {
@@ -1369,7 +1377,7 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
* with the LDLM_FL_BL_AST flag in a separate RPC from
* the one being generated now.
*/
- if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
+ if (!(cancel_flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
LDLM_DEBUG(lock, "Cancel lock separately");
list_move(&lock->l_bl_ast, &head);
bl_ast++;
@@ -1384,7 +1392,7 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
}
if (bl_ast > 0) {
count -= bl_ast;
- ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
+ ldlm_cli_cancel_list(&head, bl_ast, NULL, cancel_flags);
}
return count;
@@ -1887,11 +1895,11 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
ldlm_cancel_pack(req, cancels, count);
else
res = ldlm_cli_cancel_req(lock->l_conn_export,
- cancels, count,
+ NULL, cancels, count,
flags);
} else {
res = ldlm_cli_cancel_req(lock->l_conn_export,
- cancels, 1, flags);
+ NULL, cancels, 1, flags);
}
if (res < 0) {
--
1.8.3.1
_______________________________________________
lustre-devel mailing list
lustre-devel@lists.lustre.org
http://lists.lustre.org/listinfo.cgi/lustre-devel-lustre.org
next prev parent reply other threads:[~2023-04-09 12:42 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-09 12:12 [lustre-devel] [PATCH 00/40] lustre: backport OpenSFS changes from March XX, 2023 James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 01/40] lustre: protocol: basic batching processing framework James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 02/40] lustre: lov: fiemap improperly handles fm_extent_count=0 James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 03/40] lustre: llite: SIGBUS is possible on a race with page reclaim James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 04/40] lustre: osc: page fault in osc_release_bounce_pages() James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 05/40] lustre: readahead: add stats for read-ahead page count James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 06/40] lustre: quota: enforce project quota for root James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 07/40] lustre: ldlm: send the cancel RPC asap James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 08/40] lustre: enc: align Base64 encoding with RFC 4648 base64url James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 09/40] lustre: quota: fix insane grant quota James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 10/40] lustre: llite: check truncated page in ->readpage() James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 11/40] lnet: o2iblnd: Fix key mismatch issue James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 12/40] lustre: sec: fid2path for encrypted files James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 13/40] lustre: sec: Lustre/HSM on enc file with enc key James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 14/40] lustre: llite: check read page past requested James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 15/40] lustre: llite: fix relatime support James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 16/40] lustre: ptlrpc: clarify AT error message James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 17/40] lustre: update version to 2.15.54 James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 18/40] lustre: tgt: skip free inodes in OST weights James Simmons
2023-04-09 12:12 ` [lustre-devel] [PATCH 19/40] lustre: fileset: check fileset for operations by fid James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 20/40] lustre: clio: Remove cl_page_size() James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 21/40] lustre: fid: clean up OBIF_MAX_OID and IDIF_MAX_OID James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 22/40] lustre: llog: fix processing of a wrapped catalog James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 23/40] lustre: llite: replace lld_nfs_dentry flag with opencache handling James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 24/40] lustre: llite: match lock in corresponding namespace James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 25/40] lnet: libcfs: remove unused hash code James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 26/40] lustre: client: -o network needs add_conn processing James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 27/40] lnet: Lock primary NID logic James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 28/40] lnet: Peers added via kernel API should be permanent James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 29/40] lnet: don't delete peer created by Lustre James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 30/40] lnet: memory leak in copy_ioc_udsp_descr James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 31/40] lnet: remove crash with UDSP James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 32/40] lustre: ptlrpc: fix clang build errors James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 33/40] lustre: ldlm: remove client_import_find_conn() James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 34/40] lnet: add 'force' option to lnetctl peer del James Simmons
2023-04-09 12:13 ` James Simmons [this message]
2023-04-09 12:13 ` [lustre-devel] [PATCH 36/40] lnet: lnet_parse_route uses wrong loop var James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 37/40] lustre: tgt: add qos debug James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 38/40] lustre: enc: file names encryption when using secure boot James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 39/40] lustre: uapi: add DMV_IMP_INHERIT connect flag James Simmons
2023-04-09 12:13 ` [lustre-devel] [PATCH 40/40] lustre: llite: dir layout inheritance fixes James Simmons
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1681042400-15491-36-git-send-email-jsimmons@infradead.org \
--to=jsimmons@infradead.org \
--cc=adilger@whamcloud.com \
--cc=green@whamcloud.com \
--cc=lustre-devel@lists.lustre.org \
--cc=neilb@suse.de \
--cc=vitaly.fertman@hpe.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).