linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Michael Bringmann <mwb@linux.vnet.ibm.com>
To: linuxppc-dev@lists.ozlabs.org
Subject: Re: [RFC v5 2/6] powerpc/cpu: Conditionally acquire/release DRC index
Date: Tue, 22 May 2018 18:46:10 -0500	[thread overview]
Message-ID: <4cc2c20c-4aa4-7f2b-f446-46d1a2a53bb9@linux.vnet.ibm.com> (raw)
In-Reply-To: <f1b2b32c-d8bb-4e25-6272-1fc41ffe1502@linux.vnet.ibm.com>

Okay.  Moving validity check mentioned at bottom of patch to 4/6,
Provide CPU readd operation.

On 05/22/2018 03:17 PM, Nathan Fontenot wrote:
> On 05/21/2018 12:52 PM, Michael Bringmann wrote:
>> powerpc/cpu: Modify dlpar_cpu_add and dlpar_cpu_remove to allow the
>> skipping of DRC index acquire or release operations during the CPU
>> add or remove operations.  This is intended to support subsequent
>> changes to provide a 'CPU readd' operation.
>>
>> Signed-off-by: Michael Bringmann <mwb@linux.vnet.ibm.com>
>> ---
>>  arch/powerpc/platforms/pseries/hotplug-cpu.c |   71 +++++++++++++++-----------
>>  1 file changed, 42 insertions(+), 29 deletions(-)
>>
>> diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
>> index a408217..ec78cc6 100644
>> --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
>> +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
>> @@ -474,7 +474,7 @@ static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
>>  				&cdata);
>>  }
>>
>> -static ssize_t dlpar_cpu_add(u32 drc_index)
>> +static ssize_t dlpar_cpu_add(u32 drc_index, bool acquire_drc)
>>  {
>>  	struct device_node *dn, *parent;
>>  	int rc, saved_rc;
>> @@ -499,19 +499,22 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
>>  		return -EINVAL;
>>  	}
>>
>> -	rc = dlpar_acquire_drc(drc_index);
>> -	if (rc) {
>> -		pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
>> -			rc, drc_index);
>> -		of_node_put(parent);
>> -		return -EINVAL;
>> +	if (acquire_drc) {
>> +		rc = dlpar_acquire_drc(drc_index);
>> +		if (rc) {
>> +			pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
>> +				rc, drc_index);
>> +			of_node_put(parent);
>> +			return -EINVAL;
>> +		}
>>  	}
>>
>>  	dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
>>  	if (!dn) {
>>  		pr_warn("Failed call to configure-connector, drc index: %x\n",
>>  			drc_index);
>> -		dlpar_release_drc(drc_index);
>> +		if (acquire_drc)
>> +			dlpar_release_drc(drc_index);
>>  		of_node_put(parent);
>>  		return -EINVAL;
>>  	}
>> @@ -526,8 +529,9 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
>>  		pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
>>  			dn->name, rc, drc_index);
>>
>> -		rc = dlpar_release_drc(drc_index);
>> -		if (!rc)
>> +		if (acquire_drc)
>> +			rc = dlpar_release_drc(drc_index);
>> +		if (!rc || acquire_drc)
>>  			dlpar_free_cc_nodes(dn);
>>
>>  		return saved_rc;
>> @@ -540,7 +544,7 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
>>  			dn->name, rc, drc_index);
>>
>>  		rc = dlpar_detach_node(dn);
>> -		if (!rc)
>> +		if (!rc && acquire_drc)
>>  			dlpar_release_drc(drc_index);
>>
>>  		return saved_rc;
>> @@ -608,7 +612,8 @@ static int dlpar_offline_cpu(struct device_node *dn)
>>
>>  }
>>
>> -static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
>> +static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index,
>> +				bool release_drc)
>>  {
>>  	int rc;
>>
>> @@ -621,12 +626,14 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
>>  		return -EINVAL;
>>  	}
>>
>> -	rc = dlpar_release_drc(drc_index);
>> -	if (rc) {
>> -		pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
>> -			drc_index, dn->name, rc);
>> -		dlpar_online_cpu(dn);
>> -		return rc;
>> +	if (release_drc) {
>> +		rc = dlpar_release_drc(drc_index);
>> +		if (rc) {
>> +			pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
>> +				drc_index, dn->name, rc);
>> +			dlpar_online_cpu(dn);
>> +			return rc;
>> +		}
>>  	}
>>
>>  	rc = dlpar_detach_node(dn);
>> @@ -635,7 +642,10 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
>>
>>  		pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc);
>>
>> -		rc = dlpar_acquire_drc(drc_index);
>> +		if (release_drc)
>> +			rc = dlpar_acquire_drc(drc_index);
>> +		else
>> +			rc = 0;
>>  		if (!rc)
>>  			dlpar_online_cpu(dn);
>>
>> @@ -664,7 +674,7 @@ static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
>>  	return dn;
>>  }
>>
>> -static int dlpar_cpu_remove_by_index(u32 drc_index)
>> +static int dlpar_cpu_remove_by_index(u32 drc_index, bool release_drc)
>>  {
>>  	struct device_node *dn;
>>  	int rc;
>> @@ -676,7 +686,7 @@ static int dlpar_cpu_remove_by_index(u32 drc_index)
>>  		return -ENODEV;
>>  	}
>>
>> -	rc = dlpar_cpu_remove(dn, drc_index);
>> +	rc = dlpar_cpu_remove(dn, drc_index, release_drc);
>>  	of_node_put(dn);
>>  	return rc;
>>  }
>> @@ -741,7 +751,7 @@ static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
>>  	}
>>
>>  	for (i = 0; i < cpus_to_remove; i++) {
>> -		rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
>> +		rc = dlpar_cpu_remove_by_index(cpu_drcs[i], true);
>>  		if (rc)
>>  			break;
>>
>> @@ -752,7 +762,7 @@ static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
>>  		pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
>>
>>  		for (i = 0; i < cpus_removed; i++)
>> -			dlpar_cpu_add(cpu_drcs[i]);
>> +			dlpar_cpu_add(cpu_drcs[i], true);
>>
>>  		rc = -EINVAL;
>>  	} else {
>> @@ -843,7 +853,7 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
>>  	}
>>
>>  	for (i = 0; i < cpus_to_add; i++) {
>> -		rc = dlpar_cpu_add(cpu_drcs[i]);
>> +		rc = dlpar_cpu_add(cpu_drcs[i], true);
>>  		if (rc)
>>  			break;
>>
>> @@ -854,7 +864,7 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
>>  		pr_warn("CPU hot-add failed, removing any added CPUs\n");
>>
>>  		for (i = 0; i < cpus_added; i++)
>> -			dlpar_cpu_remove_by_index(cpu_drcs[i]);
>> +			dlpar_cpu_remove_by_index(cpu_drcs[i], true);
>>
>>  		rc = -EINVAL;
>>  	} else {
>> @@ -880,7 +890,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
>>  		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
>>  			rc = dlpar_cpu_remove_by_count(count);
>>  		else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
>> -			rc = dlpar_cpu_remove_by_index(drc_index);
>> +			rc = dlpar_cpu_remove_by_index(drc_index, true);
>>  		else
>>  			rc = -EINVAL;
>>  		break;
>> @@ -888,7 +898,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
>>  		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
>>  			rc = dlpar_cpu_add_by_count(count);
>>  		else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
>> -			rc = dlpar_cpu_add(drc_index);
>> +			rc = dlpar_cpu_add(drc_index, true);
>>  		else
>>  			rc = -EINVAL;
>>  		break;
>> @@ -913,7 +923,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
>>  	if (rc)
>>  		return -EINVAL;
>>
>> -	rc = dlpar_cpu_add(drc_index);
>> +	rc = dlpar_cpu_add(drc_index, true);
>>
>>  	return rc ? rc : count;
>>  }
>> @@ -934,7 +944,7 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
>>  		return -EINVAL;
>>  	}
>>
>> -	rc = dlpar_cpu_remove(dn, drc_index);
>> +	rc = dlpar_cpu_remove(dn, drc_index, true);
>>  	of_node_put(dn);
>>
>>  	return rc ? rc : count;
>> @@ -948,6 +958,9 @@ static int pseries_smp_notifier(struct notifier_block *nb,
>>  	struct of_reconfig_data *rd = data;
>>  	int err = 0;
>>
>> +	if (strcmp(rd->dn->type, "cpu"))
>> +		return notifier_from_errno(err);> +
> 
> This last change doesn't seem to fit in this patch, should this be a part of a different patch?
> 
> -Nathan
> 
>>  	switch (action) {
>>  	case OF_RECONFIG_ATTACH_NODE:
>>  		err = pseries_add_processor(rd->dn);
>>
> 
> 

-- 
Michael W. Bringmann
Linux Technology Center
IBM Corporation
Tie-Line  363-5196
External: (512) 286-5196
Cell:       (512) 466-0650
mwb@linux.vnet.ibm.com

  reply	other threads:[~2018-05-22 23:46 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-21 17:51 [RFC v5 0/6] powerpc/hotplug: Fix affinity assoc for LPAR migration Michael Bringmann
2018-05-21 17:52 ` [RFC v5 1/6] powerpc/drmem: Export 'dynamic-memory' loader Michael Bringmann
2018-05-21 17:52 ` [RFC v5 2/6] powerpc/cpu: Conditionally acquire/release DRC index Michael Bringmann
2018-05-22 20:17   ` Nathan Fontenot
2018-05-22 23:46     ` Michael Bringmann [this message]
2018-05-21 17:52 ` [RFC v5 3/6] migration/dlpar: Add device readd queuing function Michael Bringmann
2018-05-22 20:24   ` Thomas Falcon
2018-05-22 23:49     ` Michael Bringmann
2018-05-21 17:52 ` [RFC v5 4/6] powerpc/dlpar: Provide CPU readd operation Michael Bringmann
2018-05-21 17:52 ` [RFC v5 5/6] powerpc/mobility: Add lock/unlock device hotplug Michael Bringmann
2018-05-21 17:52 ` [RFC v5 6/6] migration/memory: Update memory for assoc changes Michael Bringmann
2018-05-22 21:11   ` Thomas Falcon
2018-05-22 23:54     ` Michael Bringmann
  -- strict thread matches above, loose matches on Subject: below --
2018-05-22 23:36 [RFC v6 0/6] powerpc/hotplug: Fix affinity assoc for LPAR migration Michael Bringmann
2018-05-22 23:36 ` [RFC v5 2/6] powerpc/cpu: Conditionally acquire/release DRC index Michael Bringmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4cc2c20c-4aa4-7f2b-f446-46d1a2a53bb9@linux.vnet.ibm.com \
    --to=mwb@linux.vnet.ibm.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).