From: Jiri Pirko <jiri@resnulli.us>
To: wangkeqi <wangkeqi_chris@163.com>
Cc: davem@davemloft.net, edumazet@google.com, kuba@kernel.org,
pabeni@redhat.com, netdev@vger.kernel.org,
linux-kernel@vger.kernel.org, fw@strlen.de,
wangkeqi <wangkeqiwang@didiglobal.com>,
kernel test robot <oliver.sang@intel.com>,
fengwei.yin@intel.com
Subject: Re: [PATCH net v4] connector: cn_netlink_has_listeners replaces proc_event_num_listeners
Date: Thu, 1 Feb 2024 16:58:12 +0100 [thread overview]
Message-ID: <Zbu_lBFkeb8NUIek@nanopsycho> (raw)
In-Reply-To: <20240131014459.411158-1-wangkeqi_chris@163.com>
Wed, Jan 31, 2024 at 02:44:59AM CET, wangkeqi_chris@163.com wrote:
>From: wangkeqi <wangkeqiwang@didiglobal.com>
Care to fix your name please?
>
>It is inaccurate to judge whether proc_event_num_listeners is
>cleared by cn_netlink_send_mult returning -ESRCH.
>In the case of stress-ng netlink-proc, -ESRCH will always be returned,
>because netlink_broadcast_filtered will return -ESRCH,
>which may cause stress-ng netlink-proc performance degradation.
>If the judgment condition is modified to whether there is a listener.
This sentence does not sound complete.
>proc_event_num_listeners will still be wrong due to concurrency.
>So replace the counter with cn_netlink_has_listeners
I'm reading the whole patch description for 5th time, I still don't
understand it :(
>
>Reported-by: kernel test robot <oliver.sang@intel.com>
>Closes: https://lore.kernel.org/oe-lkp/202401112259.b23a1567-oliver.sang@intel.com
>Fixes: c46bfba1337d ("connector: Fix proc_event_num_listeners count not cleared")
>Signed-off-by: wangkeqi <wangkeqiwang@didiglobal.com>
Same here.
>Cc: fengwei.yin@intel.com
>Cc: fw@strlen.de
>---
> drivers/connector/cn_proc.c | 33 +++++++++++++++++++++------------
> drivers/connector/connector.c | 9 +++++++++
> include/linux/connector.h | 1 +
> 3 files changed, 31 insertions(+), 12 deletions(-)
>
>diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
>index 3d5e6d705..4898e974c 100644
>--- a/drivers/connector/cn_proc.c
>+++ b/drivers/connector/cn_proc.c
>@@ -85,6 +85,16 @@ static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
> return 1;
> }
>
>+static int cn_netlink_has_listeners(void)
>+{
>+ struct sock *sk = get_cdev_nls();
>+
>+ if (sk)
>+ return netlink_has_listeners(sk, CN_IDX_PROC);
>+ else
>+ return 0;
>+}
>+
> static inline void send_msg(struct cn_msg *msg)
> {
> __u32 filter_data[2];
>@@ -108,9 +118,8 @@ static inline void send_msg(struct cn_msg *msg)
> filter_data[1] = 0;
> }
>
>- if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
>- cn_filter, (void *)filter_data) == -ESRCH)
>- atomic_set(&proc_event_num_listeners, 0);
>+ cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
>+ cn_filter, (void *)filter_data);
>
> local_unlock(&local_event.lock);
> }
>@@ -122,7 +131,7 @@ void proc_fork_connector(struct task_struct *task)
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
> struct task_struct *parent;
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
>
> msg = buffer_to_cn_msg(buffer);
>@@ -151,7 +160,7 @@ void proc_exec_connector(struct task_struct *task)
> struct proc_event *ev;
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
>
> msg = buffer_to_cn_msg(buffer);
>@@ -176,7 +185,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
> const struct cred *cred;
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
>
> msg = buffer_to_cn_msg(buffer);
>@@ -213,7 +222,7 @@ void proc_sid_connector(struct task_struct *task)
> struct proc_event *ev;
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
>
> msg = buffer_to_cn_msg(buffer);
>@@ -237,7 +246,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
> struct proc_event *ev;
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
>
> msg = buffer_to_cn_msg(buffer);
>@@ -269,7 +278,7 @@ void proc_comm_connector(struct task_struct *task)
> struct proc_event *ev;
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
>
> msg = buffer_to_cn_msg(buffer);
>@@ -295,7 +304,7 @@ void proc_coredump_connector(struct task_struct *task)
> struct task_struct *parent;
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
>
> msg = buffer_to_cn_msg(buffer);
>@@ -328,7 +337,7 @@ void proc_exit_connector(struct task_struct *task)
> struct task_struct *parent;
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
>
> msg = buffer_to_cn_msg(buffer);
>@@ -370,7 +379,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
> struct proc_event *ev;
> __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
>
>- if (atomic_read(&proc_event_num_listeners) < 1)
>+ if (!cn_netlink_has_listeners())
> return;
Are you still using &proc_event_num_listeners for anything? If not, why
don't you remove it entirely?
>
> msg = buffer_to_cn_msg(buffer);
>diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
>index 7f7b94f61..42bcb39ba 100644
>--- a/drivers/connector/connector.c
>+++ b/drivers/connector/connector.c
>@@ -129,6 +129,15 @@ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
> }
> EXPORT_SYMBOL_GPL(cn_netlink_send);
>
>+struct sock *get_cdev_nls(void)
Perhaps name it cn_cdev_nls_get() to be aligned with the rest?
>+{
>+ if (cn_already_initialized == 1)
>+ return cdev.nls;
>+ else
>+ return NULL;
>+}
>+EXPORT_SYMBOL_GPL(get_cdev_nls);
>+
> /*
> * Callback helper - queues work and setup destructor for given data.
> */
>diff --git a/include/linux/connector.h b/include/linux/connector.h
>index cec2d99ae..255466aea 100644
>--- a/include/linux/connector.h
>+++ b/include/linux/connector.h
>@@ -127,6 +127,7 @@ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid,
> */
> int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
>
>+struct sock *get_cdev_nls(void);
Add empty line.
> int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
> const struct cb_id *id,
> void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
>--
>2.27.0
>
>
prev parent reply other threads:[~2024-02-01 15:58 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-31 1:44 [PATCH net v4] connector: cn_netlink_has_listeners replaces proc_event_num_listeners wangkeqi
2024-02-01 15:58 ` Jiri Pirko [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Zbu_lBFkeb8NUIek@nanopsycho \
--to=jiri@resnulli.us \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=fengwei.yin@intel.com \
--cc=fw@strlen.de \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=oliver.sang@intel.com \
--cc=pabeni@redhat.com \
--cc=wangkeqi_chris@163.com \
--cc=wangkeqiwang@didiglobal.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox