* [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word
@ 2010-01-14 1:35 KOSAKI Motohiro
2010-01-14 6:33 ` David Rientjes
0 siblings, 1 reply; 7+ messages in thread
From: KOSAKI Motohiro @ 2010-01-14 1:35 UTC (permalink / raw)
To: LKML, linux-mm, Andrew Morton, David Rientjes, KAMEZAWA Hiroyuki,
Minchan Kim, Wu Fengguang, Huang Shijie
Cc: kosaki.motohiro
commit e815af95 (change all_unreclaimable zone member to flags) chage
all_unreclaimable member to bit flag. but It have undesireble side
effect.
free_one_page() is one of most hot path in linux kernel and increasing
atomic ops in it can reduce kernel performance a bit.
Thus, this patch revert such commit partially. at least
all_unreclaimable shouldn't share memory word with other zone flags.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
---
include/linux/mmzone.h | 7 +------
mm/page_alloc.c | 6 +++---
mm/vmscan.c | 20 ++++++++------------
mm/vmstat.c | 2 +-
4 files changed, 13 insertions(+), 22 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 30fe668..4f0c6f1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -341,6 +341,7 @@ struct zone {
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
+ int all_unreclaimable; /* All pages pinned */
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
@@ -425,7 +426,6 @@ struct zone {
} ____cacheline_internodealigned_in_smp;
typedef enum {
- ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */
ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
} zone_flags_t;
@@ -445,11 +445,6 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
clear_bit(flag, &zone->flags);
}
-static inline int zone_is_all_unreclaimable(const struct zone *zone)
-{
- return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
-}
-
static inline int zone_is_reclaim_locked(const struct zone *zone)
{
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4e9f5cc..19a5b0e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -530,7 +530,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
int batch_free = 0;
spin_lock(&zone->lock);
- zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
+ zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, count);
@@ -567,7 +567,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{
spin_lock(&zone->lock);
- zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
+ zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
@@ -2270,7 +2270,7 @@ void show_free_areas(void)
K(zone_page_state(zone, NR_BOUNCE)),
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
zone->pages_scanned,
- (zone_is_all_unreclaimable(zone) ? "yes" : "no")
+ (zone->all_unreclaimable ? "yes" : "no")
);
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 885207a..8057d36 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1694,8 +1694,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
continue;
note_zone_scanning_priority(zone, priority);
- if (zone_is_all_unreclaimable(zone) &&
- priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
sc->all_unreclaimable = 0;
} else {
@@ -2009,8 +2008,7 @@ loop_again:
if (!populated_zone(zone))
continue;
- if (zone_is_all_unreclaimable(zone) &&
- priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;
/*
@@ -2053,8 +2051,7 @@ loop_again:
if (!populated_zone(zone))
continue;
- if (zone_is_all_unreclaimable(zone) &&
- priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;
if (!zone_watermark_ok(zone, order,
@@ -2084,12 +2081,11 @@ loop_again:
lru_pages);
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
total_scanned += sc.nr_scanned;
- if (zone_is_all_unreclaimable(zone))
+ if (zone->all_unreclaimable)
continue;
- if (nr_slab == 0 && zone->pages_scanned >=
- (zone_reclaimable_pages(zone) * 6))
- zone_set_flag(zone,
- ZONE_ALL_UNRECLAIMABLE);
+ if (nr_slab == 0 &&
+ zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
+ zone->all_unreclaimable = 1;
/*
* If we've done a decent amount of scanning and
* the reclaim ratio is low, start doing writepage
@@ -2612,7 +2608,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
return ZONE_RECLAIM_FULL;
- if (zone_is_all_unreclaimable(zone))
+ if (zone->all_unreclaimable)
return ZONE_RECLAIM_FULL;
/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6051fba..8175c64 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -761,7 +761,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n prev_priority: %i"
"\n start_pfn: %lu"
"\n inactive_ratio: %u",
- zone_is_all_unreclaimable(zone),
+ zone->all_unreclaimable,
zone->prev_priority,
zone->zone_start_pfn,
zone->inactive_ratio);
--
1.6.5.2
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word
2010-01-14 1:35 [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word KOSAKI Motohiro
@ 2010-01-14 6:33 ` David Rientjes
2010-01-14 7:14 ` KOSAKI Motohiro
0 siblings, 1 reply; 7+ messages in thread
From: David Rientjes @ 2010-01-14 6:33 UTC (permalink / raw)
To: KOSAKI Motohiro
Cc: LKML, linux-mm, Andrew Morton, KAMEZAWA Hiroyuki, Minchan Kim,
Wu Fengguang, Huang Shijie
On Thu, 14 Jan 2010, KOSAKI Motohiro wrote:
> commit e815af95 (change all_unreclaimable zone member to flags) chage
> all_unreclaimable member to bit flag. but It have undesireble side
> effect.
> free_one_page() is one of most hot path in linux kernel and increasing
> atomic ops in it can reduce kernel performance a bit.
>
> Thus, this patch revert such commit partially. at least
> all_unreclaimable shouldn't share memory word with other zone flags.
>
I still think you need to quantify this; saying you don't have a large
enough of a machine that will benefit from it isn't really a rationale for
the lack of any data supporting your claim. We should be basing VM
changes on data, not on speculation that there's a measurable impact
here.
Perhaps you could ask a colleague or another hacker to run a benchmark for
you so that the changelog is complete?
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word
2010-01-14 6:33 ` David Rientjes
@ 2010-01-14 7:14 ` KOSAKI Motohiro
2010-01-14 8:32 ` Wu Fengguang
0 siblings, 1 reply; 7+ messages in thread
From: KOSAKI Motohiro @ 2010-01-14 7:14 UTC (permalink / raw)
To: David Rientjes
Cc: kosaki.motohiro, LKML, linux-mm, Andrew Morton, KAMEZAWA Hiroyuki,
Minchan Kim, Wu Fengguang, Huang Shijie
> On Thu, 14 Jan 2010, KOSAKI Motohiro wrote:
>
> > commit e815af95 (change all_unreclaimable zone member to flags) chage
> > all_unreclaimable member to bit flag. but It have undesireble side
> > effect.
> > free_one_page() is one of most hot path in linux kernel and increasing
> > atomic ops in it can reduce kernel performance a bit.
> >
> > Thus, this patch revert such commit partially. at least
> > all_unreclaimable shouldn't share memory word with other zone flags.
> >
>
> I still think you need to quantify this; saying you don't have a large
> enough of a machine that will benefit from it isn't really a rationale for
> the lack of any data supporting your claim. We should be basing VM
> changes on data, not on speculation that there's a measurable impact
> here.
>
> Perhaps you could ask a colleague or another hacker to run a benchmark for
> you so that the changelog is complete?
ok, fair. although I dislike current unnecessary atomic-ops.
I'll pending this patch until get good data.
thanks.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word
2010-01-14 7:14 ` KOSAKI Motohiro
@ 2010-01-14 8:32 ` Wu Fengguang
2010-01-14 23:19 ` Andrew Morton
0 siblings, 1 reply; 7+ messages in thread
From: Wu Fengguang @ 2010-01-14 8:32 UTC (permalink / raw)
To: KOSAKI Motohiro
Cc: David Rientjes, LKML, linux-mm, Andrew Morton, KAMEZAWA Hiroyuki,
Minchan Kim, Huang Shijie
On Thu, Jan 14, 2010 at 03:14:10PM +0800, KOSAKI Motohiro wrote:
> > On Thu, 14 Jan 2010, KOSAKI Motohiro wrote:
> >
> > > commit e815af95 (change all_unreclaimable zone member to flags) chage
> > > all_unreclaimable member to bit flag. but It have undesireble side
> > > effect.
> > > free_one_page() is one of most hot path in linux kernel and increasing
> > > atomic ops in it can reduce kernel performance a bit.
> > >
> > > Thus, this patch revert such commit partially. at least
> > > all_unreclaimable shouldn't share memory word with other zone flags.
> > >
> >
> > I still think you need to quantify this; saying you don't have a large
> > enough of a machine that will benefit from it isn't really a rationale for
> > the lack of any data supporting your claim. We should be basing VM
> > changes on data, not on speculation that there's a measurable impact
> > here.
> >
> > Perhaps you could ask a colleague or another hacker to run a benchmark for
> > you so that the changelog is complete?
>
> ok, fair. although I dislike current unnecessary atomic-ops.
> I'll pending this patch until get good data.
I think it's a reasonable expectation to help large boxes.
What we can do now, is to measure if it hurts mainline SMP
boxes. If not, we are set on doing the patch :)
Thanks,
Fengguang
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word
2010-01-14 8:32 ` Wu Fengguang
@ 2010-01-14 23:19 ` Andrew Morton
2010-01-15 2:30 ` KAMEZAWA Hiroyuki
0 siblings, 1 reply; 7+ messages in thread
From: Andrew Morton @ 2010-01-14 23:19 UTC (permalink / raw)
To: Wu Fengguang
Cc: KOSAKI Motohiro, David Rientjes, LKML, linux-mm,
KAMEZAWA Hiroyuki, Minchan Kim, Huang Shijie
On Thu, 14 Jan 2010 16:32:29 +0800
Wu Fengguang <fengguang.wu@intel.com> wrote:
> On Thu, Jan 14, 2010 at 03:14:10PM +0800, KOSAKI Motohiro wrote:
> > > On Thu, 14 Jan 2010, KOSAKI Motohiro wrote:
> > >
> > > > commit e815af95 (change all_unreclaimable zone member to flags) chage
> > > > all_unreclaimable member to bit flag. but It have undesireble side
> > > > effect.
> > > > free_one_page() is one of most hot path in linux kernel and increasing
> > > > atomic ops in it can reduce kernel performance a bit.
> > > >
> > > > Thus, this patch revert such commit partially. at least
> > > > all_unreclaimable shouldn't share memory word with other zone flags.
> > > >
> > >
> > > I still think you need to quantify this; saying you don't have a large
> > > enough of a machine that will benefit from it isn't really a rationale for
> > > the lack of any data supporting your claim. We should be basing VM
> > > changes on data, not on speculation that there's a measurable impact
> > > here.
> > >
> > > Perhaps you could ask a colleague or another hacker to run a benchmark for
> > > you so that the changelog is complete?
> >
> > ok, fair. although I dislike current unnecessary atomic-ops.
> > I'll pending this patch until get good data.
>
> I think it's a reasonable expectation to help large boxes.
>
> What we can do now, is to measure if it hurts mainline SMP
> boxes. If not, we are set on doing the patch :)
yup, the effects of the change might be hard to measure. Not that one
shouldn't try!
But sometimes we just have to do a best-effort change based upon theory
and past experience.
Speaking of which...
: --- a/include/linux/mmzone.h
: +++ b/include/linux/mmzone.h
: @@ -341,6 +341,7 @@ struct zone {
:
: unsigned long pages_scanned; /* since last reclaim */
: unsigned long flags; /* zone flags, see below */
: + int all_unreclaimable; /* All pages pinned */
:
: /* Zone statistics */
: atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
Was that the best place to put the field? It adds four bytes of
padding to the zone, hence is suboptimal from a cache utilisation point
of view.
It might also be that we can place this field closed in memory to other
fields which are being manipulated at the same time as
all_unreclaimable, hm?
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word
2010-01-14 23:19 ` Andrew Morton
@ 2010-01-15 2:30 ` KAMEZAWA Hiroyuki
2010-01-15 4:47 ` KOSAKI Motohiro
0 siblings, 1 reply; 7+ messages in thread
From: KAMEZAWA Hiroyuki @ 2010-01-15 2:30 UTC (permalink / raw)
To: Andrew Morton
Cc: Wu Fengguang, KOSAKI Motohiro, David Rientjes, LKML, linux-mm,
Minchan Kim, Huang Shijie
On Thu, 14 Jan 2010 15:19:59 -0800
Andrew Morton <akpm@linux-foundation.org> wrote:
> On Thu, 14 Jan 2010 16:32:29 +0800
> Wu Fengguang <fengguang.wu@intel.com> wrote:
>
> > On Thu, Jan 14, 2010 at 03:14:10PM +0800, KOSAKI Motohiro wrote:
> > > > On Thu, 14 Jan 2010, KOSAKI Motohiro wrote:
> > > >
> > > > > commit e815af95 (change all_unreclaimable zone member to flags) chage
> > > > > all_unreclaimable member to bit flag. but It have undesireble side
> > > > > effect.
> > > > > free_one_page() is one of most hot path in linux kernel and increasing
> > > > > atomic ops in it can reduce kernel performance a bit.
> > > > >
> > > > > Thus, this patch revert such commit partially. at least
> > > > > all_unreclaimable shouldn't share memory word with other zone flags.
> > > > >
> > > >
> > > > I still think you need to quantify this; saying you don't have a large
> > > > enough of a machine that will benefit from it isn't really a rationale for
> > > > the lack of any data supporting your claim. We should be basing VM
> > > > changes on data, not on speculation that there's a measurable impact
> > > > here.
> > > >
> > > > Perhaps you could ask a colleague or another hacker to run a benchmark for
> > > > you so that the changelog is complete?
> > >
> > > ok, fair. although I dislike current unnecessary atomic-ops.
> > > I'll pending this patch until get good data.
> >
> > I think it's a reasonable expectation to help large boxes.
> >
> > What we can do now, is to measure if it hurts mainline SMP
> > boxes. If not, we are set on doing the patch :)
>
> yup, the effects of the change might be hard to measure. Not that one
> shouldn't try!
>
> But sometimes we just have to do a best-effort change based upon theory
> and past experience.
>
> Speaking of which...
>
> : --- a/include/linux/mmzone.h
> : +++ b/include/linux/mmzone.h
> : @@ -341,6 +341,7 @@ struct zone {
> :
> : unsigned long pages_scanned; /* since last reclaim */
> : unsigned long flags; /* zone flags, see below */
> : + int all_unreclaimable; /* All pages pinned */
> :
> : /* Zone statistics */
> : atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
>
> Was that the best place to put the field? It adds four bytes of
> padding to the zone, hence is suboptimal from a cache utilisation point
> of view.
>
> It might also be that we can place this field closed in memory to other
> fields which are being manipulated at the same time as
> all_unreclaimable, hm?
>
How about the same line where zone->lock is ?
Thanks,
-Kame
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word
2010-01-15 2:30 ` KAMEZAWA Hiroyuki
@ 2010-01-15 4:47 ` KOSAKI Motohiro
0 siblings, 0 replies; 7+ messages in thread
From: KOSAKI Motohiro @ 2010-01-15 4:47 UTC (permalink / raw)
To: KAMEZAWA Hiroyuki
Cc: kosaki.motohiro, Andrew Morton, Wu Fengguang, David Rientjes,
LKML, linux-mm, Minchan Kim, Huang Shijie
> On Thu, 14 Jan 2010 15:19:59 -0800
> Andrew Morton <akpm@linux-foundation.org> wrote:
>
> > On Thu, 14 Jan 2010 16:32:29 +0800
> > Wu Fengguang <fengguang.wu@intel.com> wrote:
> >
> > > On Thu, Jan 14, 2010 at 03:14:10PM +0800, KOSAKI Motohiro wrote:
> > > > > On Thu, 14 Jan 2010, KOSAKI Motohiro wrote:
> > > > >
> > > > > > commit e815af95 (change all_unreclaimable zone member to flags) chage
> > > > > > all_unreclaimable member to bit flag. but It have undesireble side
> > > > > > effect.
> > > > > > free_one_page() is one of most hot path in linux kernel and increasing
> > > > > > atomic ops in it can reduce kernel performance a bit.
> > > > > >
> > > > > > Thus, this patch revert such commit partially. at least
> > > > > > all_unreclaimable shouldn't share memory word with other zone flags.
> > > > > >
> > > > >
> > > > > I still think you need to quantify this; saying you don't have a large
> > > > > enough of a machine that will benefit from it isn't really a rationale for
> > > > > the lack of any data supporting your claim. We should be basing VM
> > > > > changes on data, not on speculation that there's a measurable impact
> > > > > here.
> > > > >
> > > > > Perhaps you could ask a colleague or another hacker to run a benchmark for
> > > > > you so that the changelog is complete?
> > > >
> > > > ok, fair. although I dislike current unnecessary atomic-ops.
> > > > I'll pending this patch until get good data.
> > >
> > > I think it's a reasonable expectation to help large boxes.
> > >
> > > What we can do now, is to measure if it hurts mainline SMP
> > > boxes. If not, we are set on doing the patch :)
> >
> > yup, the effects of the change might be hard to measure. Not that one
> > shouldn't try!
> >
> > But sometimes we just have to do a best-effort change based upon theory
> > and past experience.
> >
> > Speaking of which...
> >
> > : --- a/include/linux/mmzone.h
> > : +++ b/include/linux/mmzone.h
> > : @@ -341,6 +341,7 @@ struct zone {
> > :
> > : unsigned long pages_scanned; /* since last reclaim */
> > : unsigned long flags; /* zone flags, see below */
> > : + int all_unreclaimable; /* All pages pinned */
> > :
> > : /* Zone statistics */
> > : atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
> >
> > Was that the best place to put the field? It adds four bytes of
> > padding to the zone, hence is suboptimal from a cache utilisation point
> > of view.
> >
> > It might also be that we can place this field closed in memory to other
> > fields which are being manipulated at the same time as
> > all_unreclaimable, hm?
> >
> How about the same line where zone->lock is ?
Sure. page allocator obviously touch zone->lock at first.
Incremental patch is here.
---
include/linux/mmzone.h | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4f0c6f1..0df3749 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -314,6 +314,7 @@ struct zone {
* free areas of different sizes
*/
spinlock_t lock;
+ int all_unreclaimable; /* All pages pinned */
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
@@ -341,7 +342,6 @@ struct zone {
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
- int all_unreclaimable; /* All pages pinned */
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
--
1.6.5.2
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply related [flat|nested] 7+ messages in thread
end of thread, other threads:[~2010-01-15 4:47 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-01-14 1:35 [resend][PATCH] mm: Restore zone->all_unreclaimable to independence word KOSAKI Motohiro
2010-01-14 6:33 ` David Rientjes
2010-01-14 7:14 ` KOSAKI Motohiro
2010-01-14 8:32 ` Wu Fengguang
2010-01-14 23:19 ` Andrew Morton
2010-01-15 2:30 ` KAMEZAWA Hiroyuki
2010-01-15 4:47 ` KOSAKI Motohiro
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).