public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] PPC64 iSeries: cleanup viopath
@ 2005-03-15  3:34 Stephen Rothwell
  2005-03-15 14:32 ` Hollis Blanchard
  0 siblings, 1 reply; 6+ messages in thread
From: Stephen Rothwell @ 2005-03-15  3:34 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linus, ppc64-dev, LKML

[-- Attachment #1: Type: text/plain, Size: 4850 bytes --]

Hi Andrew,

Since you brought this file to my attention, I figured I might as well do
some simple cleanups.  This patch does:
	- single bit int bitfields are a bit suspect and Anndrew pointed
	  out recently that they are probably slower to access than ints
	- get rid of some more stufly caps
	- define the semaphore and the atomic in struct alloc_parms
	  rather than pointers to them since we just allocate them on
	  the stack anyway.
	- one small white space cleanup
	- use the HvLpIndexInvalid constant instead of ita value

Built and booted on iSeries (which is the only place it is used).

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
-- 
Cheers,
Stephen Rothwell                    sfr@canb.auug.org.au
http://www.canb.auug.org.au/~sfr/

diff -ruNp linus/arch/ppc64/kernel/viopath.c linus-cleanup.1/arch/ppc64/kernel/viopath.c
--- linus/arch/ppc64/kernel/viopath.c	2005-03-13 04:07:42.000000000 +1100
+++ linus-cleanup.1/arch/ppc64/kernel/viopath.c	2005-03-15 14:02:48.000000000 +1100
@@ -42,6 +42,7 @@
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
+#include <asm/iSeries/HvTypes.h>
 #include <asm/iSeries/LparData.h>
 #include <asm/iSeries/HvLpEvent.h>
 #include <asm/iSeries/HvLpConfig.h>
@@ -56,8 +57,8 @@
  * But this allows for other support in the future.
  */
 static struct viopathStatus {
-	int isOpen:1;		/* Did we open the path?            */
-	int isActive:1;		/* Do we have a mon msg outstanding */
+	int isOpen;		/* Did we open the path?            */
+	int isActive;		/* Do we have a mon msg outstanding */
 	int users[VIO_MAX_SUBTYPES];
 	HvLpInstanceId mSourceInst;
 	HvLpInstanceId mTargetInst;
@@ -81,10 +82,10 @@ static void handleMonitorEvent(struct Hv
  * blocks on the semaphore and the handler posts the semaphore.  However,
  * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
  */
-struct doneAllocParms_t {
-	struct semaphore *sem;
+struct alloc_parms {
+	struct semaphore sem;
 	int number;
-	atomic_t *wait_atomic;
+	atomic_t wait_atomic;
 	int used_wait_atomic;
 };
 
@@ -97,9 +98,9 @@ static u8 viomonseq = 22;
 /* Our hosting logical partition.  We get this at startup
  * time, and different modules access this variable directly.
  */
-HvLpIndex viopath_hostLp = 0xff;	/* HvLpIndexInvalid */
+HvLpIndex viopath_hostLp = HvLpIndexInvalid;
 EXPORT_SYMBOL(viopath_hostLp);
-HvLpIndex viopath_ourLp = 0xff;
+HvLpIndex viopath_ourLp = HvLpIndexInvalid;
 EXPORT_SYMBOL(viopath_ourLp);
 
 /* For each kind of incoming event we set a pointer to a
@@ -200,7 +201,7 @@ EXPORT_SYMBOL(viopath_isactive);
 
 /*
  * We cache the source and target instance ids for each
- * partition.  
+ * partition.
  */
 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
 {
@@ -450,36 +451,33 @@ static void vio_handleEvent(struct HvLpE
 
 static void viopath_donealloc(void *parm, int number)
 {
-	struct doneAllocParms_t *parmsp = (struct doneAllocParms_t *)parm;
+	struct alloc_parms *parmsp = parm;
 
 	parmsp->number = number;
 	if (parmsp->used_wait_atomic)
-		atomic_set(parmsp->wait_atomic, 0);
+		atomic_set(&parmsp->wait_atomic, 0);
 	else
-		up(parmsp->sem);
+		up(&parmsp->sem);
 }
 
 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
 {
-	struct doneAllocParms_t parms;
-	DECLARE_MUTEX_LOCKED(Semaphore);
-	atomic_t wait_atomic;
+	struct alloc_parms parms;
 
 	if (system_state != SYSTEM_RUNNING) {
 		parms.used_wait_atomic = 1;
-		atomic_set(&wait_atomic, 1);
-		parms.wait_atomic = &wait_atomic;
+		atomic_set(&parms.wait_atomic, 1);
 	} else {
 		parms.used_wait_atomic = 0;
-		parms.sem = &Semaphore;
+		init_MUTEX_LOCKED(&parms.sem);
 	}
 	mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250,	/* It would be nice to put a real number here! */
 			    numEvents, &viopath_donealloc, &parms);
 	if (system_state != SYSTEM_RUNNING) {
-		while (atomic_read(&wait_atomic))
+		while (atomic_read(&parms.wait_atomic))
 			mb();
 	} else
-		down(&Semaphore);
+		down(&parms.sem);
 	return parms.number;
 }
 
@@ -558,8 +556,7 @@ int viopath_close(HvLpIndex remoteLp, in
 	unsigned long flags;
 	int i;
 	int numOpen;
-	struct doneAllocParms_t doneAllocParms;
-	DECLARE_MUTEX_LOCKED(Semaphore);
+	struct alloc_parms parms;
 
 	if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid))
 		return -EINVAL;
@@ -580,11 +577,11 @@ int viopath_close(HvLpIndex remoteLp, in
 
 	spin_unlock_irqrestore(&statuslock, flags);
 
-	doneAllocParms.used_wait_atomic = 0;
-	doneAllocParms.sem = &Semaphore;
+	parms.used_wait_atomic = 0;
+	init_MUTEX_LOCKED(&parms.sem);
 	mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
-			      numReq, &viopath_donealloc, &doneAllocParms);
-	down(&Semaphore);
+			      numReq, &viopath_donealloc, &parms);
+	down(&parms.sem);
 
 	spin_lock_irqsave(&statuslock, flags);
 	for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)

[-- Attachment #2: Type: application/pgp-signature, Size: 189 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] PPC64 iSeries: cleanup viopath
  2005-03-15  3:34 [PATCH] PPC64 iSeries: cleanup viopath Stephen Rothwell
@ 2005-03-15 14:32 ` Hollis Blanchard
  2005-03-15 15:53   ` Stephen Rothwell
  0 siblings, 1 reply; 6+ messages in thread
From: Hollis Blanchard @ 2005-03-15 14:32 UTC (permalink / raw)
  To: Stephen Rothwell; +Cc: Linus, Andrew Morton, ppc64-dev, LKML

On Mar 14, 2005, at 9:34 PM, Stephen Rothwell wrote:
>
> Since you brought this file to my attention, I figured I might as well 
> do
> some simple cleanups.  This patch does:
> 	- single bit int bitfields are a bit suspect and Anndrew pointed
> 	  out recently that they are probably slower to access than ints

> --- linus/arch/ppc64/kernel/viopath.c	2005-03-13 04:07:42.000000000 
> +1100
> +++ linus-cleanup.1/arch/ppc64/kernel/viopath.c	2005-03-15 
> 14:02:48.000000000 +1100
> @@ -56,8 +57,8 @@
>   * But this allows for other support in the future.
>   */
>  static struct viopathStatus {
> -	int isOpen:1;		/* Did we open the path?            */
> -	int isActive:1;		/* Do we have a mon msg outstanding */
> +	int isOpen;		/* Did we open the path?            */
> +	int isActive;		/* Do we have a mon msg outstanding */
>  	int users[VIO_MAX_SUBTYPES];
>  	HvLpInstanceId mSourceInst;
>  	HvLpInstanceId mTargetInst;

Why not use a byte instead of a full int (reordering the members for 
alignment)?

-- 
Hollis Blanchard
IBM Linux Technology Center


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] PPC64 iSeries: cleanup viopath
  2005-03-15 14:32 ` Hollis Blanchard
@ 2005-03-15 15:53   ` Stephen Rothwell
  2005-03-15 17:43     ` Linas Vepstas
  2005-03-16 15:12     ` Hollis Blanchard
  0 siblings, 2 replies; 6+ messages in thread
From: Stephen Rothwell @ 2005-03-15 15:53 UTC (permalink / raw)
  To: Hollis Blanchard; +Cc: akpm, linuxppc64-dev, torvalds, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 1808 bytes --]

On Tue, 15 Mar 2005 08:32:27 -0600 Hollis Blanchard <hollis@penguinppc.org> wrote:
>
> On Mar 14, 2005, at 9:34 PM, Stephen Rothwell wrote:
> >
> > Since you brought this file to my attention, I figured I might as well 
> > do
> > some simple cleanups.  This patch does:
> > 	- single bit int bitfields are a bit suspect and Anndrew pointed
> > 	  out recently that they are probably slower to access than ints
> 
> > --- linus/arch/ppc64/kernel/viopath.c	2005-03-13 04:07:42.000000000 
> > +1100
> > +++ linus-cleanup.1/arch/ppc64/kernel/viopath.c	2005-03-15 
> > 14:02:48.000000000 +1100
> > @@ -56,8 +57,8 @@
> >   * But this allows for other support in the future.
> >   */
> >  static struct viopathStatus {
> > -	int isOpen:1;		/* Did we open the path?            */
> > -	int isActive:1;		/* Do we have a mon msg outstanding */
> > +	int isOpen;		/* Did we open the path?            */
> > +	int isActive;		/* Do we have a mon msg outstanding */
> >  	int users[VIO_MAX_SUBTYPES];
> >  	HvLpInstanceId mSourceInst;
> >  	HvLpInstanceId mTargetInst;
> 
> Why not use a byte instead of a full int (reordering the members for 
> alignment)?

Because "classical" boleans are ints.

Because I don't know the relative speed of accessing single byte variables.

Because it was easy.

Because we only allocate 32 of these structures.  Changing them really
only adds four bytes per structure.  I guess using bytes and rearranging
the structure could actually save 4 bytes per structure.

I originally changed them to unsigned int single bit bitfields, but
changed my mind - would that be better?

It really makes little difference, I was just trying to get rid of the
silly signed single bit bitfields ...

-- 
Cheers,
Stephen Rothwell                    sfr@canb.auug.org.au
http://www.canb.auug.org.au/~sfr/

[-- Attachment #2: Type: application/pgp-signature, Size: 189 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] PPC64 iSeries: cleanup viopath
  2005-03-15 15:53   ` Stephen Rothwell
@ 2005-03-15 17:43     ` Linas Vepstas
  2005-03-15 17:49       ` Brad Boyer
  2005-03-16 15:12     ` Hollis Blanchard
  1 sibling, 1 reply; 6+ messages in thread
From: Linas Vepstas @ 2005-03-15 17:43 UTC (permalink / raw)
  To: Stephen Rothwell
  Cc: Hollis Blanchard, akpm, linuxppc64-dev, torvalds, linux-kernel

On Wed, Mar 16, 2005 at 02:53:39AM +1100, Stephen Rothwell was heard to remark:
> On Tue, 15 Mar 2005 08:32:27 -0600 Hollis Blanchard <hollis@penguinppc.org> wrote:
> > 
> > Why not use a byte instead of a full int (reordering the members for 
> > alignment)?
> 
> Because "classical" boleans are ints.
> 
> Because I don't know the relative speed of accessing single byte variables.
> 
> Because it was easy.
> 
> Because we only allocate 32 of these structures.  Changing them really
> only adds four bytes per structure.  I guess using bytes and rearranging
> the structure could actually save 4 bytes per structure.

FWIW, keep in mind that a cache miss due to large structures not fitting
is a zillion times more expensive than byte-aligning in the cpu 
(even if byte operands had a cpu perf overhead, which I don't think 
they do on ppc).

> It really makes little difference, 

Yep. So my apologies for making you read this email.

--linas


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] PPC64 iSeries: cleanup viopath
  2005-03-15 17:43     ` Linas Vepstas
@ 2005-03-15 17:49       ` Brad Boyer
  0 siblings, 0 replies; 6+ messages in thread
From: Brad Boyer @ 2005-03-15 17:49 UTC (permalink / raw)
  To: Linas Vepstas
  Cc: Stephen Rothwell, akpm, linuxppc64-dev, torvalds, linux-kernel

On Tue, Mar 15, 2005 at 11:43:10AM -0600, Linas Vepstas wrote:
> FWIW, keep in mind that a cache miss due to large structures not fitting
> is a zillion times more expensive than byte-aligning in the cpu 
> (even if byte operands had a cpu perf overhead, which I don't think 
> they do on ppc).

Actually, there is a small overhead to bytes if you make them signed.
That's why char is unsigned by default on ppc.

	Brad Boyer
	flar@allandria.com


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] PPC64 iSeries: cleanup viopath
  2005-03-15 15:53   ` Stephen Rothwell
  2005-03-15 17:43     ` Linas Vepstas
@ 2005-03-16 15:12     ` Hollis Blanchard
  1 sibling, 0 replies; 6+ messages in thread
From: Hollis Blanchard @ 2005-03-16 15:12 UTC (permalink / raw)
  To: Stephen Rothwell; +Cc: akpm, linuxppc64-dev, linux-kernel, torvalds

On Mar 15, 2005, at 9:53 AM, Stephen Rothwell wrote:

> On Tue, 15 Mar 2005 08:32:27 -0600 Hollis Blanchard 
> <hollis@penguinppc.org> wrote:
>>
>> On Mar 14, 2005, at 9:34 PM, Stephen Rothwell wrote:
>>>
>>> Since you brought this file to my attention, I figured I might as 
>>> well
>>> do
>>> some simple cleanups.  This patch does:
>>> 	- single bit int bitfields are a bit suspect and Anndrew pointed
>>> 	  out recently that they are probably slower to access than ints
>>
>>> --- linus/arch/ppc64/kernel/viopath.c	2005-03-13 04:07:42.000000000
>>> +1100
>>> +++ linus-cleanup.1/arch/ppc64/kernel/viopath.c	2005-03-15
>>> 14:02:48.000000000 +1100
>>> @@ -56,8 +57,8 @@
>>>   * But this allows for other support in the future.
>>>   */
>>>  static struct viopathStatus {
>>> -	int isOpen:1;		/* Did we open the path?            */
>>> -	int isActive:1;		/* Do we have a mon msg outstanding */
>>> +	int isOpen;		/* Did we open the path?            */
>>> +	int isActive;		/* Do we have a mon msg outstanding */
>>>  	int users[VIO_MAX_SUBTYPES];
>>>  	HvLpInstanceId mSourceInst;
>>>  	HvLpInstanceId mTargetInst;
>>
>> Why not use a byte instead of a full int (reordering the members for
>> alignment)?
>
> Because "classical" boleans are ints.
>
> Because I don't know the relative speed of accessing single byte 
> variables.

I didn't see the original observation that bitfields are slow. If the 
argument was that loading a bitfield requires a load then mask, then 
you'll be happy to find that PPC has word, halfword, and byte load 
instructions. So loading a byte (unsigned, as Brad pointed out) should 
be just as fast as loading a word.

> It really makes little difference, I was just trying to get rid of the
> silly signed single bit bitfields ...

I understand. I was half being nitpicky, and half wondering if there 
was an actual reason I was missing.

-Hollis


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2005-03-16 15:15 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-03-15  3:34 [PATCH] PPC64 iSeries: cleanup viopath Stephen Rothwell
2005-03-15 14:32 ` Hollis Blanchard
2005-03-15 15:53   ` Stephen Rothwell
2005-03-15 17:43     ` Linas Vepstas
2005-03-15 17:49       ` Brad Boyer
2005-03-16 15:12     ` Hollis Blanchard

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox