Linux NFS development
 help / color / mirror / Atom feed
From: dai.ngo@oracle.com
To: Jeff Layton <jlayton@kernel.org>, chuck.lever@oracle.com
Cc: linux-nfs@vger.kernel.org
Subject: Re: [PATCH v2 1/2] NFSD: keep track of the number of courtesy clients in the system
Date: Mon, 29 Aug 2022 11:24:11 -0700	[thread overview]
Message-ID: <a27ff607-d386-bd19-c6d1-2d51f3af6108@oracle.com> (raw)
In-Reply-To: <d922f34be03a6df4bb8a0dd12df4a085ba983cb8.camel@kernel.org>


On 8/29/22 9:48 AM, Jeff Layton wrote:
> On Sun, 2022-08-28 at 17:47 -0700, Dai Ngo wrote:
>> Add counter nfs4_courtesy_client_count to nfsd_net to keep track
>> of the number of courtesy clients in the system.
>>
>> Signed-off-by: Dai Ngo <dai.ngo@oracle.com>
>> ---
>>   fs/nfsd/netns.h     |  2 ++
>>   fs/nfsd/nfs4state.c | 20 +++++++++++++++-----
>>   2 files changed, 17 insertions(+), 5 deletions(-)
>>
>> diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
>> index ffe17743cc74..2695dff1378a 100644
>> --- a/fs/nfsd/netns.h
>> +++ b/fs/nfsd/netns.h
>> @@ -192,6 +192,8 @@ struct nfsd_net {
>>   
>>   	atomic_t		nfs4_client_count;
>>   	int			nfs4_max_clients;
>> +
>> +	atomic_t		nfsd_courtesy_client_count;
>>   };
>>   
>>   /* Simple check to find out if a given net was properly initialized */
>> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
>> index c5d199d7e6b4..3d8d7ebb5b91 100644
>> --- a/fs/nfsd/nfs4state.c
>> +++ b/fs/nfsd/nfs4state.c
>> @@ -169,7 +169,8 @@ static __be32 get_client_locked(struct nfs4_client *clp)
>>   	if (is_client_expired(clp))
>>   		return nfserr_expired;
>>   	atomic_inc(&clp->cl_rpc_users);
>> -	clp->cl_state = NFSD4_ACTIVE;
>> +	if (xchg(&clp->cl_state, NFSD4_ACTIVE) != NFSD4_ACTIVE)
> The xchg calls seem like overkill. The cl_state is protected by the
> nn->client_lock. Nothing else can race in and change it here.

I use the 'xchg' calls for convenience and readability and not for
protection in this case. But if you think this is overkill or
unnecessary then I remove it.

Fix in v2.

Thanks,
-Dai

>
>> +		atomic_add_unless(&nn->nfsd_courtesy_client_count, -1, 0);
>>   	return nfs_ok;
>>   }
>>   
>> @@ -190,7 +191,8 @@ renew_client_locked(struct nfs4_client *clp)
>>   
>>   	list_move_tail(&clp->cl_lru, &nn->client_lru);
>>   	clp->cl_time = ktime_get_boottime_seconds();
>> -	clp->cl_state = NFSD4_ACTIVE;
>> +	if (xchg(&clp->cl_state, NFSD4_ACTIVE) != NFSD4_ACTIVE)
>> +		atomic_add_unless(&nn->nfsd_courtesy_client_count, -1, 0);
>>   }
>>   
>>   static void put_client_renew_locked(struct nfs4_client *clp)
>> @@ -2233,6 +2235,8 @@ __destroy_client(struct nfs4_client *clp)
>>   	if (clp->cl_cb_conn.cb_xprt)
>>   		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
>>   	atomic_add_unless(&nn->nfs4_client_count, -1, 0);
>> +	if (clp->cl_state != NFSD4_ACTIVE)
>> +		atomic_add_unless(&nn->nfsd_courtesy_client_count, -1, 0);
>>   	free_client(clp);
>>   	wake_up_all(&expiry_wq);
>>   }
>> @@ -4356,6 +4360,8 @@ void nfsd4_init_leases_net(struct nfsd_net *nn)
>>   	max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
>>   	max_clients *= NFS4_CLIENTS_PER_GB;
>>   	nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
>> +
>> +	atomic_set(&nn->nfsd_courtesy_client_count, 0);
>>   }
>>   
>>   static void init_nfs4_replay(struct nfs4_replay *rp)
>> @@ -5864,7 +5870,7 @@ static void
>>   nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
>>   				struct laundry_time *lt)
>>   {
>> -	unsigned int maxreap, reapcnt = 0;
>> +	unsigned int oldstate, maxreap, reapcnt = 0;
>>   	struct list_head *pos, *next;
>>   	struct nfs4_client *clp;
>>   
>> @@ -5878,8 +5884,12 @@ nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
>>   			goto exp_client;
>>   		if (!state_expired(lt, clp->cl_time))
>>   			break;
>> -		if (!atomic_read(&clp->cl_rpc_users))
>> -			clp->cl_state = NFSD4_COURTESY;
>> +		oldstate = NFSD4_ACTIVE;
>> +		if (!atomic_read(&clp->cl_rpc_users)) {
>> +			oldstate = xchg(&clp->cl_state, NFSD4_COURTESY);
>> +			if (oldstate == NFSD4_ACTIVE)
>> +				atomic_inc(&nn->nfsd_courtesy_client_count);
>> +		}
>>   		if (!client_has_state(clp))
>>   			goto exp_client;
>>   		if (!nfs4_anylock_blockers(clp))

  reply	other threads:[~2022-08-29 18:24 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-29  0:47 [PATCH v2 0/2] NFSD: memory shrinker for NFSv4 clients Dai Ngo
2022-08-29  0:47 ` [PATCH v2 1/2] NFSD: keep track of the number of courtesy clients in the system Dai Ngo
2022-08-29 16:48   ` Jeff Layton
2022-08-29 18:24     ` dai.ngo [this message]
2022-08-29 18:27       ` Jeff Layton
2022-08-29  0:47 ` [PATCH v2 2/2] NFSD: add shrinker to reap courtesy clients on low memory condition Dai Ngo
2022-08-29 17:15   ` Jeff Layton
2022-08-29 18:25     ` dai.ngo
2022-08-29 18:40       ` Jeff Layton
2022-08-29 18:52         ` dai.ngo
  -- strict thread matches above, loose matches on Subject: below --
2022-07-04 19:05 [PATCH v2 0/2] NFSD: handling memory shortage problem with Courteous server Dai Ngo
2022-07-04 19:05 ` [PATCH v2 1/2] NFSD: keep track of the number of courtesy clients in the system Dai Ngo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a27ff607-d386-bd19-c6d1-2d51f3af6108@oracle.com \
    --to=dai.ngo@oracle.com \
    --cc=chuck.lever@oracle.com \
    --cc=jlayton@kernel.org \
    --cc=linux-nfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox