linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Robert Jennings <rcj@linux.vnet.ibm.com>
To: paulus@samba.org
Cc: Brian King <brking@linux.vnet.ibm.com>,
	linuxppc-dev@ozlabs.org,
	David Darrington <ddarring@linux.vnet.ibm.com>
Subject: [PATCH 04/16 v3] powerpc: Split retrieval of processor entitlement data into a helper routine
Date: Fri, 4 Jul 2008 07:52:07 -0500	[thread overview]
Message-ID: <20080704125207.GE1310@linux.vnet.ibm.com> (raw)
In-Reply-To: <20080704124449.GA1310@linux.vnet.ibm.com>

=46rom: Nathan Fontenot <nfont@austin.ibm.com>

Split the retrieval of processor entitlement data returned in the H_GET_PPP
hcall into its own helper routine.

Signed-off-by: Nathan Fontenot <nfont@austin.ibm.com>
Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com>

---
 arch/powerpc/kernel/lparcfg.c |   80 ++++++++++++++++++++++++-------------=
-----
 1 file changed, 45 insertions(+), 35 deletions(-)

Index: b/arch/powerpc/kernel/lparcfg.c
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -158,6 +158,18 @@ int h_get_mpp(struct hvcall_mpp_data *mp
 }
 EXPORT_SYMBOL(h_get_mpp);
=20
+struct hvcall_ppp_data {
+	u64	entitlement;
+	u64	unallocated_entitlement;
+	u16	group_num;
+	u16	pool_num;
+	u8	capped;
+	u8	weight;
+	u8	unallocated_weight;
+	u16	active_procs_in_pool;
+	u16	active_system_procs;
+};
+
 /*
  * H_GET_PPP hcall returns info in 4 parms.
  *  entitled_capacity,unallocated_capacity,
@@ -178,20 +190,24 @@ EXPORT_SYMBOL(h_get_mpp);
  *              XXXX - Active processors in Physical Processor Pool.
  *                  XXXX  - Processors active on platform.
  */
-static unsigned int h_get_ppp(unsigned long *entitled,
-			      unsigned long *unallocated,
-			      unsigned long *aggregation,
-			      unsigned long *resource)
+static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
 {
 	unsigned long rc;
 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
=20
 	rc =3D plpar_hcall(H_GET_PPP, retbuf);
=20
-	*entitled =3D retbuf[0];
-	*unallocated =3D retbuf[1];
-	*aggregation =3D retbuf[2];
-	*resource =3D retbuf[3];
+	ppp_data->entitlement =3D retbuf[0];
+	ppp_data->unallocated_entitlement =3D retbuf[1];
+
+	ppp_data->group_num =3D (retbuf[2] >> 2 * 8) & 0xffff;
+	ppp_data->pool_num =3D retbuf[2] & 0xffff;
+
+	ppp_data->capped =3D (retbuf[3] >> 6 * 8) & 0x01;
+	ppp_data->weight =3D (retbuf[3] >> 5 * 8) & 0xff;
+	ppp_data->unallocated_weight =3D (retbuf[3] >> 4 * 8) & 0xff;
+	ppp_data->active_procs_in_pool =3D (retbuf[3] >> 2 * 8) & 0xffff;
+	ppp_data->active_system_procs =3D retbuf[3] & 0xffff;
=20
 	return rc;
 }
@@ -216,29 +232,27 @@ static unsigned h_pic(unsigned long *poo
  */
 static void parse_ppp_data(struct seq_file *m)
 {
-	unsigned long h_entitled, h_unallocated;
-	unsigned long h_aggregation, h_resource;
+	struct hvcall_ppp_data ppp_data;
 	int rc;
=20
-	rc =3D h_get_ppp(&h_entitled, &h_unallocated, &h_aggregation,
-		       &h_resource);
+	rc =3D h_get_ppp(&ppp_data);
 	if (rc)
 		return;
=20
-	seq_printf(m, "partition_entitled_capacity=3D%ld\n", h_entitled);
-	seq_printf(m, "group=3D%ld\n", (h_aggregation >> 2 * 8) & 0xffff);
-	seq_printf(m, "system_active_processors=3D%ld\n",
-		   (h_resource >> 0 * 8) & 0xffff);
+	seq_printf(m, "partition_entitled_capacity=3D%ld\n",
+	           ppp_data.entitlement);
+	seq_printf(m, "group=3D%d\n", ppp_data.group_num);
+	seq_printf(m, "system_active_processors=3D%d\n",
+	           ppp_data.active_system_procs);
=20
 	/* pool related entries are apropriate for shared configs */
 	if (lppaca[0].shared_proc) {
 		unsigned long pool_idle_time, pool_procs;
=20
-		seq_printf(m, "pool=3D%ld\n", (h_aggregation >> 0 * 8) & 0xffff);
+		seq_printf(m, "pool=3D%d\n", ppp_data.pool_num);
=20
 		/* report pool_capacity in percentage */
-		seq_printf(m, "pool_capacity=3D%ld\n",
-			   ((h_resource >> 2 * 8) & 0xffff) * 100);
+		seq_printf(m, "pool_capacity=3D%d\n", ppp_data.group_num * 100);
=20
 		rc =3D h_pic(&pool_idle_time, &pool_procs);
 		if (! rc) {
@@ -247,12 +261,12 @@ static void parse_ppp_data(struct seq_fi
 		}
 	}
=20
-	seq_printf(m, "unallocated_capacity_weight=3D%ld\n",
-		   (h_resource >> 4 * 8) & 0xFF);
-
-	seq_printf(m, "capacity_weight=3D%ld\n", (h_resource >> 5 * 8) & 0xFF);
-	seq_printf(m, "capped=3D%ld\n", (h_resource >> 6 * 8) & 0x01);
-	seq_printf(m, "unallocated_capacity=3D%ld\n", h_unallocated);
+	seq_printf(m, "unallocated_capacity_weight=3D%d\n",
+		   ppp_data.unallocated_weight);
+	seq_printf(m, "capacity_weight=3D%d\n", ppp_data.weight);
+	seq_printf(m, "capped=3D%d\n", ppp_data.capped);
+	seq_printf(m, "unallocated_capacity=3D%ld\n",
+		   ppp_data.unallocated_entitlement);
 }
=20
 /**
@@ -451,31 +465,27 @@ static int pseries_lparcfg_data(struct s
=20
 static ssize_t update_ppp(u64 *entitlement, u8 *weight)
 {
-	unsigned long current_entitled;
-	unsigned long dummy;
-	unsigned long resource;
-	u8 current_weight, new_weight;
+	struct hvcall_ppp_data ppp_data;
+	u8 new_weight;
 	u64 new_entitled;
 	ssize_t retval;
=20
 	/* Get our current parameters */
-	retval =3D h_get_ppp(&current_entitled, &dummy, &dummy, &resource);
+	retval =3D h_get_ppp(&ppp_data);
 	if (retval)
 		return retval;
=20
-	current_weight =3D (resource >> 5 * 8) & 0xFF;
-
 	if (entitlement) {
-		new_weight =3D current_weight;
+		new_weight =3D ppp_data.weight;
 		new_entitled =3D *entitlement;
 	} else if (weight) {
 		new_weight =3D *weight;
-		new_entitled =3D current_entitled;
+		new_entitled =3D ppp_data.entitlement;
 	} else
 		return -EINVAL;
=20
 	pr_debug("%s: current_entitled =3D %lu, current_weight =3D %u\n",
-		 __FUNCTION__, current_entitled, current_weight);
+	         __FUNCTION__, ppp_data.entitlement, ppp_data.weight);
=20
 	pr_debug("%s: new_entitled =3D %lu, new_weight =3D %u\n",
 		 __FUNCTION__, new_entitled, new_weight);

  parent reply	other threads:[~2008-07-04 12:52 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-07-04 12:44 [PATCH 00/16 v3] powerpc: pSeries Cooperative Memory Overcommitment support Robert Jennings
2008-07-04 12:51 ` [PATCH 01/16 v3] powerpc: Remove extraneous error reporting for hcall failures in lparcfg Robert Jennings
2008-07-22  3:34   ` Paul Mackerras
2008-07-04 12:51 ` [PATCH 02/16 v3] powerpc: Split processor entitlement retrieval and gathering to helper routines Robert Jennings
2008-07-22 18:53   ` Nathan Fontenot
2008-07-04 12:51 ` [PATCH 03/16 v3] powerpc: Add memory entitlement capabilities to /proc/ppc64/lparcfg Robert Jennings
2008-07-22 18:55   ` Nathan Fontenot
2008-07-04 12:52 ` Robert Jennings [this message]
2008-07-22  5:54   ` [PATCH 04/16 v3] powerpc: Split retrieval of processor entitlement data into a helper routine Paul Mackerras
2008-07-22 18:49     ` Nathan Fontenot
2008-07-22 18:56   ` Nathan Fontenot
2008-07-04 12:52 ` [PATCH 05/16 v3] powerpc: Enable CMO feature during platform setup Robert Jennings
2008-07-04 12:52 ` Robert Jennings
2008-07-04 12:52 ` [PATCH 06/16 v3] powerpc: Utilities to set firmware page state Robert Jennings
2008-07-04 12:53 ` Robert Jennings
2008-07-04 12:53 ` [PATCH 07/16 v3] powerpc: Add collaborative memory manager Robert Jennings
2008-07-22  4:53   ` Paul Mackerras
2008-07-04 12:54 ` [PATCH 08/16 v3] powerpc: Do not probe PCI buses or eBus devices if CMO is enabled Robert Jennings
2008-07-14 21:35   ` Brian King
2008-07-04 12:54 ` [PATCH 09/16 v3] powerpc: Add CMO paging statistics Robert Jennings
2008-07-04 12:54 ` [PATCH 10/16 v3] powerpc: iommu enablement for CMO Robert Jennings
2008-07-05 17:51   ` Olof Johansson
2008-07-08 20:48   ` [PATCH 10/16 v3] [v2] " Robert Jennings
2008-07-22  5:04     ` Paul Mackerras
2008-07-22 13:30       ` Robert Jennings
2008-07-22  4:57   ` [PATCH 10/16 v3] " Paul Mackerras
2008-07-22 13:28     ` Robert Jennings
2008-07-04 12:55 ` [PATCH 11/16 v3] powerpc: vio bus support " Robert Jennings
2008-07-04 12:55 ` [PATCH 12/16 v3] powerpc: Verify CMO memory entitlement updates with virtual I/O Robert Jennings
2008-07-04 12:55 ` [PATCH 13/16 v3] ibmveth: Automatically enable larger rx buffer pools for larger mtu Robert Jennings
2008-07-04 12:56 ` [PATCH 14/16 v3] ibmveth: enable driver for CMO Robert Jennings
2008-07-08 20:38   ` [PATCH 14/16 v3] [v2] " Robert Jennings
2008-07-04 12:56 ` [PATCH 15/16 v3] ibmvscsi: driver enablement " Robert Jennings
2008-07-07 14:34   ` Brian King
2008-07-08 17:41     ` Robert Jennings
2008-07-08 20:35   ` [PATCH 15/16 v3] [v2] " Robert Jennings
2008-07-10 13:43     ` Brian King
2008-07-04 12:57 ` [PATCH 16/16 v3] powerpc: Update arch vector to indicate support " Robert Jennings

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20080704125207.GE1310@linux.vnet.ibm.com \
    --to=rcj@linux.vnet.ibm.com \
    --cc=brking@linux.vnet.ibm.com \
    --cc=ddarring@linux.vnet.ibm.com \
    --cc=linuxppc-dev@ozlabs.org \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).