linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Wei Yang <weiyang@linux.vnet.ibm.com>,
	gwshan@linux.vnet.ibm.com, benh@kernel.crashing.org
Cc: linuxppc-dev@ozlabs.org
Subject: Re: [PATCH V4 6/6] powerpc/powernv: allocate sparse PE# when using M64 BAR in Single PE mode
Date: Fri, 2 Oct 2015 20:05:47 +1000	[thread overview]
Message-ID: <560E56FB.60208@ozlabs.ru> (raw)
In-Reply-To: <1439949704-8023-7-git-send-email-weiyang@linux.vnet.ibm.com>

On 08/19/2015 12:01 PM, Wei Yang wrote:
> When M64 BAR is set to Single PE mode, the PE# assigned to VF could be
> sparse.
>
> This patch restructures the patch to allocate sparse PE# for VFs when M64

This patch restructures the code ;)


> BAR is set to Single PE mode. Also it rename the offset to pe_num_map to
> reflect the content is the PE number.
>
> Signed-off-by: Wei Yang <weiyang@linux.vnet.ibm.com>
> ---
>   arch/powerpc/include/asm/pci-bridge.h     |    2 +-
>   arch/powerpc/platforms/powernv/pci-ioda.c |   79 ++++++++++++++++++++++-------
>   2 files changed, 61 insertions(+), 20 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
> index 8aeba4c..b3a226b 100644
> --- a/arch/powerpc/include/asm/pci-bridge.h
> +++ b/arch/powerpc/include/asm/pci-bridge.h
> @@ -213,7 +213,7 @@ struct pci_dn {
>   #ifdef CONFIG_PCI_IOV
>   	u16     vfs_expanded;		/* number of VFs IOV BAR expanded */
>   	u16     num_vfs;		/* number of VFs enabled*/
> -	int     offset;			/* PE# for the first VF PE */
> +	int     *pe_num_map;		/* PE# for the first VF PE or array */
>   	bool    m64_single_mode;	/* Use M64 BAR in Single Mode */
>   #define IODA_INVALID_M64        (-1)
>   	int     (*m64_map)[PCI_SRIOV_NUM_BARS];
> diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
> index 4bc83b8..779f52a 100644
> --- a/arch/powerpc/platforms/powernv/pci-ioda.c
> +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
> @@ -1243,7 +1243,7 @@ static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
>
>   			/* Map the M64 here */
>   			if (pdn->m64_single_mode) {
> -				pe_num = pdn->offset + j;
> +				pe_num = pdn->pe_num_map[j];
>   				rc = opal_pci_map_pe_mmio_window(phb->opal_id,
>   						pe_num, OPAL_M64_WINDOW_TYPE,
>   						pdn->m64_map[j][i], 0);
> @@ -1347,7 +1347,7 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev)
>   	struct pnv_phb        *phb;
>   	struct pci_dn         *pdn;
>   	struct pci_sriov      *iov;
> -	u16 num_vfs;
> +	u16                    num_vfs, i;
>
>   	bus = pdev->bus;
>   	hose = pci_bus_to_host(bus);
> @@ -1361,14 +1361,21 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev)
>
>   	if (phb->type == PNV_PHB_IODA2) {
>   		if (!pdn->m64_single_mode)
> -			pnv_pci_vf_resource_shift(pdev, -pdn->offset);
> +			pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
>
>   		/* Release M64 windows */
>   		pnv_pci_vf_release_m64(pdev, num_vfs);
>
>   		/* Release PE numbers */
> -		bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
> -		pdn->offset = 0;
> +		if (pdn->m64_single_mode) {
> +			for (i = 0; i < num_vfs; i++) {
> +				if (pdn->pe_num_map[i] != IODA_INVALID_PE)
> +					pnv_ioda_free_pe(phb, pdn->pe_num_map[i]);
> +			}
> +		} else
> +			bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
> +		/* Releasing pe_num_map */
> +		kfree(pdn->pe_num_map);
>   	}
>   }
>
> @@ -1394,7 +1401,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
>
>   	/* Reserve PE for each VF */
>   	for (vf_index = 0; vf_index < num_vfs; vf_index++) {
> -		pe_num = pdn->offset + vf_index;
> +		if (pdn->m64_single_mode)
> +			pe_num = pdn->pe_num_map[vf_index];
> +		else
> +			pe_num = *pdn->pe_num_map + vf_index;
>
>   		pe = &phb->ioda.pe_array[pe_num];
>   		pe->pe_number = pe_num;
> @@ -1436,6 +1446,7 @@ int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
>   	struct pnv_phb        *phb;
>   	struct pci_dn         *pdn;
>   	int                    ret;
> +	u16                    i;
>
>   	bus = pdev->bus;
>   	hose = pci_bus_to_host(bus);
> @@ -1458,20 +1469,42 @@ int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
>   			return -EBUSY;
>   		}
>
> +		/* Allocating pe_num_map */
> +		if (pdn->m64_single_mode)
> +			pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map) * num_vfs,
> +					GFP_KERNEL);
> +		else
> +			pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
> +
> +		if (!pdn->pe_num_map)
> +			return -ENOMEM;

[*]

> +
>   		/* Calculate available PE for required VFs */
> -		mutex_lock(&phb->ioda.pe_alloc_mutex);
> -		pdn->offset = bitmap_find_next_zero_area(
> -			phb->ioda.pe_alloc, phb->ioda.total_pe,
> -			0, num_vfs, 0);
> -		if (pdn->offset >= phb->ioda.total_pe) {
> +		if (pdn->m64_single_mode) {
> +			for (i = 0; i < num_vfs; i++)
> +				pdn->pe_num_map[i] = IODA_INVALID_PE;

It is cleaner to do such initialization right after the check that 
kmalloc() did not fail, i.e. at [*]


> +			for (i = 0; i < num_vfs; i++) {
> +				pdn->pe_num_map[i] = pnv_ioda_alloc_pe(phb);
> +				if (pdn->pe_num_map[i] == IODA_INVALID_PE) {
> +					ret = -EBUSY;
> +					goto m64_failed;
> +				}
> +			}
> +		} else {
> +			mutex_lock(&phb->ioda.pe_alloc_mutex);
> +			*pdn->pe_num_map = bitmap_find_next_zero_area(
> +				phb->ioda.pe_alloc, phb->ioda.total_pe,
> +				0, num_vfs, 0);
> +			if (*pdn->pe_num_map >= phb->ioda.total_pe) {
> +				mutex_unlock(&phb->ioda.pe_alloc_mutex);
> +				dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
> +				kfree(pdn->pe_num_map);
> +				return -EBUSY;
> +			}
> +			bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
>   			mutex_unlock(&phb->ioda.pe_alloc_mutex);
> -			dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
> -			pdn->offset = 0;
> -			return -EBUSY;
>   		}
> -		bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
>   		pdn->num_vfs = num_vfs;
> -		mutex_unlock(&phb->ioda.pe_alloc_mutex);
>
>   		/* Assign M64 window accordingly */
>   		ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
> @@ -1486,7 +1519,7 @@ int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
>   		 * Otherwise, the PE# for the VF will conflict with others.
>   		 */
>   		if (!pdn->m64_single_mode) {
> -			ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
> +			ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
>   			if (ret)
>   				goto m64_failed;
>   		}
> @@ -1498,8 +1531,16 @@ int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
>   	return 0;
>
>   m64_failed:
> -	bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
> -	pdn->offset = 0;
> +	if (pdn->m64_single_mode) {
> +		for (i = 0; i < num_vfs; i++) {
> +			if (pdn->pe_num_map[i] != IODA_INVALID_PE)
> +				pnv_ioda_free_pe(phb, pdn->pe_num_map[i]);
> +		}
> +	} else
> +		bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
> +
> +	/* Releasing pe_num_map */
> +	kfree(pdn->pe_num_map);
>
>   	return ret;
>   }
>


-- 
Alexey

  parent reply	other threads:[~2015-10-02 10:05 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-08-19  2:01 [PATCH V4 0/6] Redesign SR-IOV on PowerNV Wei Yang
2015-08-19  2:01 ` [PATCH V4 1/6] powerpc/powernv: don't enable SRIOV when VF BAR has non 64bit-prefetchable BAR Wei Yang
2015-10-02  8:55   ` Alexey Kardashevskiy
2015-10-08  6:29     ` Wei Yang
2015-08-19  2:01 ` [PATCH V4 2/6] powerpc/powernv: simplify the calculation of iov resource alignment Wei Yang
2015-10-02  8:58   ` Alexey Kardashevskiy
2015-10-08  6:39     ` Wei Yang
2015-08-19  2:01 ` [PATCH V4 3/6] powerpc/powernv: use one M64 BAR in Single PE mode for one VF BAR Wei Yang
2015-08-19  2:21   ` Gavin Shan
2015-10-02  9:29   ` Alexey Kardashevskiy
2015-10-08  7:06     ` Wei Yang
2015-08-19  2:01 ` [PATCH V4 4/6] powerpc/powernv: replace the hard coded boundary with gate Wei Yang
2015-08-19  2:01 ` [PATCH V4 5/6] powerpc/powernv: boundary the total VF BAR size instead of the individual one Wei Yang
2015-10-02  9:51   ` Alexey Kardashevskiy
2015-10-08  7:13     ` Wei Yang
2015-08-19  2:01 ` [PATCH V4 6/6] powerpc/powernv: allocate sparse PE# when using M64 BAR in Single PE mode Wei Yang
2015-08-19  2:21   ` Gavin Shan
2015-10-02 10:05   ` Alexey Kardashevskiy [this message]
2015-10-08  7:19     ` Wei Yang
2015-08-26  5:11 ` [PATCH V4 0/6] Redesign SR-IOV on PowerNV Alexey Kardashevskiy
2015-08-26  8:06   ` Alexey Kardashevskiy
2015-10-02 10:07 ` Alexey Kardashevskiy
2015-10-07  2:43   ` Michael Ellerman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=560E56FB.60208@ozlabs.ru \
    --to=aik@ozlabs.ru \
    --cc=benh@kernel.crashing.org \
    --cc=gwshan@linux.vnet.ibm.com \
    --cc=linuxppc-dev@ozlabs.org \
    --cc=weiyang@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).