linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 2/3] hugetlb-move-reservation-region-support-earlier
  2008-05-07 20:24 [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V1 Andy Whitcroft
@ 2008-05-07 20:24 ` Andy Whitcroft
  0 siblings, 0 replies; 9+ messages in thread
From: Andy Whitcroft @ 2008-05-07 20:24 UTC (permalink / raw)
  To: linux-mm; +Cc: linux-kernel, agl, wli, kenchen, dwg, andi, Mel Gorman, dean

The following patch will require use of the reservation regions support.
Move this earlier in the file.  No changes have been made to this code.

Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
 mm/hugetlb.c |  242 +++++++++++++++++++++++++++++-----------------------------
 1 files changed, 121 insertions(+), 121 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 17c5069..81b13dc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -556,6 +556,127 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
 	}
 }
 
+struct file_region {
+	struct list_head link;
+	long from;
+	long to;
+};
+
+static long region_add(struct list_head *head, long f, long t)
+{
+	struct file_region *rg, *nrg, *trg;
+
+	/* Locate the region we are either in or before. */
+	list_for_each_entry(rg, head, link)
+		if (f <= rg->to)
+			break;
+
+	/* Round our left edge to the current segment if it encloses us. */
+	if (f > rg->from)
+		f = rg->from;
+
+	/* Check for and consume any regions we now overlap with. */
+	nrg = rg;
+	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		if (rg->from > t)
+			break;
+
+		/* If this area reaches higher then extend our area to
+		 * include it completely.  If this is not the first area
+		 * which we intend to reuse, free it. */
+		if (rg->to > t)
+			t = rg->to;
+		if (rg != nrg) {
+			list_del(&rg->link);
+			kfree(rg);
+		}
+	}
+	nrg->from = f;
+	nrg->to = t;
+	return 0;
+}
+
+static long region_chg(struct list_head *head, long f, long t)
+{
+	struct file_region *rg, *nrg;
+	long chg = 0;
+
+	/* Locate the region we are before or in. */
+	list_for_each_entry(rg, head, link)
+		if (f <= rg->to)
+			break;
+
+	/* If we are below the current region then a new region is required.
+	 * Subtle, allocate a new region at the position but make it zero
+	 * size such that we can guarantee to record the reservation. */
+	if (&rg->link == head || t < rg->from) {
+		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
+		if (!nrg)
+			return -ENOMEM;
+		nrg->from = f;
+		nrg->to   = f;
+		INIT_LIST_HEAD(&nrg->link);
+		list_add(&nrg->link, rg->link.prev);
+
+		return t - f;
+	}
+
+	/* Round our left edge to the current segment if it encloses us. */
+	if (f > rg->from)
+		f = rg->from;
+	chg = t - f;
+
+	/* Check for and consume any regions we now overlap with. */
+	list_for_each_entry(rg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		if (rg->from > t)
+			return chg;
+
+		/* We overlap with this area, if it extends futher than
+		 * us then we must extend ourselves.  Account for its
+		 * existing reservation. */
+		if (rg->to > t) {
+			chg += rg->to - t;
+			t = rg->to;
+		}
+		chg -= rg->to - rg->from;
+	}
+	return chg;
+}
+
+static long region_truncate(struct list_head *head, long end)
+{
+	struct file_region *rg, *trg;
+	long chg = 0;
+
+	/* Locate the region we are either in or before. */
+	list_for_each_entry(rg, head, link)
+		if (end <= rg->to)
+			break;
+	if (&rg->link == head)
+		return 0;
+
+	/* If we are in the middle of a region then adjust it. */
+	if (end > rg->from) {
+		chg = rg->to - end;
+		rg->to = end;
+		rg = list_entry(rg->link.next, typeof(*rg), link);
+	}
+
+	/* Drop any remaining regions. */
+	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		chg += rg->to - rg->from;
+		list_del(&rg->link);
+		kfree(rg);
+	}
+	return chg;
+}
+
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
 				    unsigned long addr, int avoid_reserve)
 {
@@ -1367,127 +1488,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 	flush_tlb_range(vma, start, end);
 }
 
-struct file_region {
-	struct list_head link;
-	long from;
-	long to;
-};
-
-static long region_add(struct list_head *head, long f, long t)
-{
-	struct file_region *rg, *nrg, *trg;
-
-	/* Locate the region we are either in or before. */
-	list_for_each_entry(rg, head, link)
-		if (f <= rg->to)
-			break;
-
-	/* Round our left edge to the current segment if it encloses us. */
-	if (f > rg->from)
-		f = rg->from;
-
-	/* Check for and consume any regions we now overlap with. */
-	nrg = rg;
-	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		if (rg->from > t)
-			break;
-
-		/* If this area reaches higher then extend our area to
-		 * include it completely.  If this is not the first area
-		 * which we intend to reuse, free it. */
-		if (rg->to > t)
-			t = rg->to;
-		if (rg != nrg) {
-			list_del(&rg->link);
-			kfree(rg);
-		}
-	}
-	nrg->from = f;
-	nrg->to = t;
-	return 0;
-}
-
-static long region_chg(struct list_head *head, long f, long t)
-{
-	struct file_region *rg, *nrg;
-	long chg = 0;
-
-	/* Locate the region we are before or in. */
-	list_for_each_entry(rg, head, link)
-		if (f <= rg->to)
-			break;
-
-	/* If we are below the current region then a new region is required.
-	 * Subtle, allocate a new region at the position but make it zero
-	 * size such that we can guarantee to record the reservation. */
-	if (&rg->link == head || t < rg->from) {
-		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
-		if (!nrg)
-			return -ENOMEM;
-		nrg->from = f;
-		nrg->to   = f;
-		INIT_LIST_HEAD(&nrg->link);
-		list_add(&nrg->link, rg->link.prev);
-
-		return t - f;
-	}
-
-	/* Round our left edge to the current segment if it encloses us. */
-	if (f > rg->from)
-		f = rg->from;
-	chg = t - f;
-
-	/* Check for and consume any regions we now overlap with. */
-	list_for_each_entry(rg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		if (rg->from > t)
-			return chg;
-
-		/* We overlap with this area, if it extends futher than
-		 * us then we must extend ourselves.  Account for its
-		 * existing reservation. */
-		if (rg->to > t) {
-			chg += rg->to - t;
-			t = rg->to;
-		}
-		chg -= rg->to - rg->from;
-	}
-	return chg;
-}
-
-static long region_truncate(struct list_head *head, long end)
-{
-	struct file_region *rg, *trg;
-	long chg = 0;
-
-	/* Locate the region we are either in or before. */
-	list_for_each_entry(rg, head, link)
-		if (end <= rg->to)
-			break;
-	if (&rg->link == head)
-		return 0;
-
-	/* If we are in the middle of a region then adjust it. */
-	if (end > rg->from) {
-		chg = rg->to - end;
-		rg->to = end;
-		rg = list_entry(rg->link.next, typeof(*rg), link);
-	}
-
-	/* Drop any remaining regions. */
-	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		chg += rg->to - rg->from;
-		list_del(&rg->link);
-		kfree(rg);
-	}
-	return chg;
-}
-
 int hugetlb_reserve_pages(struct inode *inode,
 					long from, long to,
 					struct vm_area_struct *vma)

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] hugetlb-move-reservation-region-support-earlier
  2008-05-20 16:54 [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V2 Andy Whitcroft
@ 2008-05-20 16:55 ` Andy Whitcroft
  0 siblings, 0 replies; 9+ messages in thread
From: Andy Whitcroft @ 2008-05-20 16:55 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, agl, wli, kenchen, dwg, andi, Mel Gorman, dean, abh,
	Andy Whitcroft

The following patch will require use of the reservation regions support.
Move this earlier in the file.  No changes have been made to this code.

Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
 mm/hugetlb.c |  242 +++++++++++++++++++++++++++++-----------------------------
 1 files changed, 121 insertions(+), 121 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3cae97d..9f060f1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -561,6 +561,127 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
 	}
 }
 
+struct file_region {
+	struct list_head link;
+	long from;
+	long to;
+};
+
+static long region_add(struct list_head *head, long f, long t)
+{
+	struct file_region *rg, *nrg, *trg;
+
+	/* Locate the region we are either in or before. */
+	list_for_each_entry(rg, head, link)
+		if (f <= rg->to)
+			break;
+
+	/* Round our left edge to the current segment if it encloses us. */
+	if (f > rg->from)
+		f = rg->from;
+
+	/* Check for and consume any regions we now overlap with. */
+	nrg = rg;
+	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		if (rg->from > t)
+			break;
+
+		/* If this area reaches higher then extend our area to
+		 * include it completely.  If this is not the first area
+		 * which we intend to reuse, free it. */
+		if (rg->to > t)
+			t = rg->to;
+		if (rg != nrg) {
+			list_del(&rg->link);
+			kfree(rg);
+		}
+	}
+	nrg->from = f;
+	nrg->to = t;
+	return 0;
+}
+
+static long region_chg(struct list_head *head, long f, long t)
+{
+	struct file_region *rg, *nrg;
+	long chg = 0;
+
+	/* Locate the region we are before or in. */
+	list_for_each_entry(rg, head, link)
+		if (f <= rg->to)
+			break;
+
+	/* If we are below the current region then a new region is required.
+	 * Subtle, allocate a new region at the position but make it zero
+	 * size such that we can guarantee to record the reservation. */
+	if (&rg->link == head || t < rg->from) {
+		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
+		if (!nrg)
+			return -ENOMEM;
+		nrg->from = f;
+		nrg->to   = f;
+		INIT_LIST_HEAD(&nrg->link);
+		list_add(&nrg->link, rg->link.prev);
+
+		return t - f;
+	}
+
+	/* Round our left edge to the current segment if it encloses us. */
+	if (f > rg->from)
+		f = rg->from;
+	chg = t - f;
+
+	/* Check for and consume any regions we now overlap with. */
+	list_for_each_entry(rg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		if (rg->from > t)
+			return chg;
+
+		/* We overlap with this area, if it extends futher than
+		 * us then we must extend ourselves.  Account for its
+		 * existing reservation. */
+		if (rg->to > t) {
+			chg += rg->to - t;
+			t = rg->to;
+		}
+		chg -= rg->to - rg->from;
+	}
+	return chg;
+}
+
+static long region_truncate(struct list_head *head, long end)
+{
+	struct file_region *rg, *trg;
+	long chg = 0;
+
+	/* Locate the region we are either in or before. */
+	list_for_each_entry(rg, head, link)
+		if (end <= rg->to)
+			break;
+	if (&rg->link == head)
+		return 0;
+
+	/* If we are in the middle of a region then adjust it. */
+	if (end > rg->from) {
+		chg = rg->to - end;
+		rg->to = end;
+		rg = list_entry(rg->link.next, typeof(*rg), link);
+	}
+
+	/* Drop any remaining regions. */
+	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		chg += rg->to - rg->from;
+		list_del(&rg->link);
+		kfree(rg);
+	}
+	return chg;
+}
+
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
 				    unsigned long addr, int avoid_reserve)
 {
@@ -1370,127 +1491,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 	flush_tlb_range(vma, start, end);
 }
 
-struct file_region {
-	struct list_head link;
-	long from;
-	long to;
-};
-
-static long region_add(struct list_head *head, long f, long t)
-{
-	struct file_region *rg, *nrg, *trg;
-
-	/* Locate the region we are either in or before. */
-	list_for_each_entry(rg, head, link)
-		if (f <= rg->to)
-			break;
-
-	/* Round our left edge to the current segment if it encloses us. */
-	if (f > rg->from)
-		f = rg->from;
-
-	/* Check for and consume any regions we now overlap with. */
-	nrg = rg;
-	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		if (rg->from > t)
-			break;
-
-		/* If this area reaches higher then extend our area to
-		 * include it completely.  If this is not the first area
-		 * which we intend to reuse, free it. */
-		if (rg->to > t)
-			t = rg->to;
-		if (rg != nrg) {
-			list_del(&rg->link);
-			kfree(rg);
-		}
-	}
-	nrg->from = f;
-	nrg->to = t;
-	return 0;
-}
-
-static long region_chg(struct list_head *head, long f, long t)
-{
-	struct file_region *rg, *nrg;
-	long chg = 0;
-
-	/* Locate the region we are before or in. */
-	list_for_each_entry(rg, head, link)
-		if (f <= rg->to)
-			break;
-
-	/* If we are below the current region then a new region is required.
-	 * Subtle, allocate a new region at the position but make it zero
-	 * size such that we can guarantee to record the reservation. */
-	if (&rg->link == head || t < rg->from) {
-		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
-		if (!nrg)
-			return -ENOMEM;
-		nrg->from = f;
-		nrg->to   = f;
-		INIT_LIST_HEAD(&nrg->link);
-		list_add(&nrg->link, rg->link.prev);
-
-		return t - f;
-	}
-
-	/* Round our left edge to the current segment if it encloses us. */
-	if (f > rg->from)
-		f = rg->from;
-	chg = t - f;
-
-	/* Check for and consume any regions we now overlap with. */
-	list_for_each_entry(rg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		if (rg->from > t)
-			return chg;
-
-		/* We overlap with this area, if it extends futher than
-		 * us then we must extend ourselves.  Account for its
-		 * existing reservation. */
-		if (rg->to > t) {
-			chg += rg->to - t;
-			t = rg->to;
-		}
-		chg -= rg->to - rg->from;
-	}
-	return chg;
-}
-
-static long region_truncate(struct list_head *head, long end)
-{
-	struct file_region *rg, *trg;
-	long chg = 0;
-
-	/* Locate the region we are either in or before. */
-	list_for_each_entry(rg, head, link)
-		if (end <= rg->to)
-			break;
-	if (&rg->link == head)
-		return 0;
-
-	/* If we are in the middle of a region then adjust it. */
-	if (end > rg->from) {
-		chg = rg->to - end;
-		rg->to = end;
-		rg = list_entry(rg->link.next, typeof(*rg), link);
-	}
-
-	/* Drop any remaining regions. */
-	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		chg += rg->to - rg->from;
-		list_del(&rg->link);
-		kfree(rg);
-	}
-	return chg;
-}
-
 int hugetlb_reserve_pages(struct inode *inode,
 					long from, long to,
 					struct vm_area_struct *vma)

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V3
@ 2008-05-27 23:09 Andy Whitcroft
  2008-05-27 23:09 ` [PATCH 1/3] record MAP_NORESERVE status on vmas and fix small page mprotect reservations Andy Whitcroft
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Andy Whitcroft @ 2008-05-27 23:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, agl, wli, kenchen, dwg, andi, Mel Gorman,
	dean, abh, Andy Whitcroft

This stack is a rebase of the V2 stack onto 2.6.26-rc2-mm1 with
Mel's "Guarantee faults for processes that call mmap(MAP_PRIVATE) on
hugetlbfs v4" applied.  This stack allows map users to opt-out of the
new stricter over-commit handling should those semantics be unsuitable,
using the standard MAP_NORESERVE mmap flag.

This stack should be seen as complementary to Mel's stack, which it is
dependant on.

Please consider for -mm.

-apw
===
With Mel's hugetlb private reservation support patches applied, strict
overcommit semantics are applied to both shared and private huge
page mappings.  This can be a problem if an application relied on
unlimited overcommit semantics for private mappings.  An example of this
would be an application which maps a huge area with the intention of
using it very sparsely.  These application would benefit from being able
to opt-out of the strict overcommit.  It should be noted that prior to
hugetlb supporting demand faulting all mappings were fully populated and
so applications of this type should be rare.

This patch stack implements the MAP_NORESERVE mmap() flag for huge page
mappings.  This flag has the same meaning as for small page mappings,
suppressing reservations for that mapping.

The stack is made up of three patches:

record-MAP_NORESERVE-status-on-vmas-and-fix-small-page-mprotect-reservations --
  currently when we mprotect a private MAP_NORESERVE mapping read-write
  we have no choice but to create a reservation for it.  Fix that by
  introducing a VM_NORESERVE vma flag and checking it before allocating
  reserve.

hugetlb-move-reservation-region-support-earlier -- simply moves the
  reservation region support so it can be used earlier.

hugetlb-allow-huge-page-mappings-to-be-created-without-reservations --
  use the new VM_NORESERVE flag to control the application of hugetlb
  reservations to new mappings.

This has been functionally tested with a hugetlb reservation test suite.

All against 2.6.26-rc2-mm1 with Mel's private reservation patches:

	Subject: Guarantee faults for processes that call mmap(MAP_PRIVATE)
	  on hugetlbfs v4

Thanks to Mel Gorman for reviewing a number of early versions of these
patches.

-apw

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/3] record MAP_NORESERVE status on vmas and fix small page mprotect reservations
  2008-05-27 23:09 [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V3 Andy Whitcroft
@ 2008-05-27 23:09 ` Andy Whitcroft
  2008-05-27 23:09 ` [PATCH 2/3] hugetlb-move-reservation-region-support-earlier Andy Whitcroft
  2008-05-27 23:10 ` [PATCH 3/3] hugetlb-allow-huge-page-mappings-to-be-created-without-reservations Andy Whitcroft
  2 siblings, 0 replies; 9+ messages in thread
From: Andy Whitcroft @ 2008-05-27 23:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, agl, wli, kenchen, dwg, andi, Mel Gorman,
	dean, abh, Andy Whitcroft

When a small page mapping is created with mmap() reservations are created
by default for any memory pages required.  When the region is read/write
the reservation is increased for every page, no reservation is needed for
read-only regions (as they implicitly share the zero page).  Reservations
are tracked via the VM_ACCOUNT vma flag which is present when the region
has reservation backing it.  When we convert a region from read-only to
read-write new reservations are aquired and VM_ACCOUNT is set.  However,
when a read-only map is created with MAP_NORESERVE it is indistinguishable
from a normal mapping.  When we then convert that to read/write we are
forced to incorrectly create reservations for it as we have no record of
the original MAP_NORESERVE.

This patch introduces a new vma flag VM_NORESERVE which records the
presence of the original MAP_NORESERVE flag.  This allows us to distinguish
these two circumstances and correctly account the reserve.

As well as fixing this FIXME in the code, this makes it much easier to
introduce MAP_NORESERVE support for huge pages as this flag is available
consistantly for the life of the mapping.  VM_ACCOUNT on the other hand
is heavily used at the generic level in association with small pages.

Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
 include/linux/mm.h |    1 +
 mm/mmap.c          |    3 +++
 mm/mprotect.c      |    6 ++----
 3 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0ffed95..c2be4c3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -100,6 +100,7 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
 #define VM_RESERVED	0x00080000	/* Count as reserved_vm like IO */
 #define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
+#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
diff --git a/mm/mmap.c b/mm/mmap.c
index fac6633..98ab014 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1101,6 +1101,9 @@ munmap_back:
 	if (!may_expand_vm(mm, len >> PAGE_SHIFT))
 		return -ENOMEM;
 
+	if (flags & MAP_NORESERVE)
+		vm_flags |= VM_NORESERVE;
+
 	if (accountable && (!(flags & MAP_NORESERVE) ||
 			    sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
 		if (vm_flags & VM_SHARED) {
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a5bf31c..e0d0a6d 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -155,12 +155,10 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 	 * If we make a private mapping writable we increase our commit;
 	 * but (without finer accounting) cannot reduce our commit if we
 	 * make it unwritable again.
-	 *
-	 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
-	 * a MAP_NORESERVE private mapping to writable will now reserve.
 	 */
 	if (newflags & VM_WRITE) {
-		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
+		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|
+						VM_SHARED|VM_NORESERVE))) {
 			charged = nrpages;
 			if (security_vm_enough_memory(charged))
 				return -ENOMEM;

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] hugetlb-move-reservation-region-support-earlier
  2008-05-27 23:09 [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V3 Andy Whitcroft
  2008-05-27 23:09 ` [PATCH 1/3] record MAP_NORESERVE status on vmas and fix small page mprotect reservations Andy Whitcroft
@ 2008-05-27 23:09 ` Andy Whitcroft
  2008-05-28 20:38   ` Adam Litke
  2008-05-27 23:10 ` [PATCH 3/3] hugetlb-allow-huge-page-mappings-to-be-created-without-reservations Andy Whitcroft
  2 siblings, 1 reply; 9+ messages in thread
From: Andy Whitcroft @ 2008-05-27 23:09 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, agl, wli, kenchen, dwg, andi, Mel Gorman,
	dean, abh, Andy Whitcroft

The following patch will require use of the reservation regions support.
Move this earlier in the file.  No changes have been made to this code.

Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
 mm/hugetlb.c |  242 +++++++++++++++++++++++++++++-----------------------------
 1 files changed, 121 insertions(+), 121 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cd18d11..90a7f5f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -561,6 +561,127 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
 	}
 }
 
+struct file_region {
+	struct list_head link;
+	long from;
+	long to;
+};
+
+static long region_add(struct list_head *head, long f, long t)
+{
+	struct file_region *rg, *nrg, *trg;
+
+	/* Locate the region we are either in or before. */
+	list_for_each_entry(rg, head, link)
+		if (f <= rg->to)
+			break;
+
+	/* Round our left edge to the current segment if it encloses us. */
+	if (f > rg->from)
+		f = rg->from;
+
+	/* Check for and consume any regions we now overlap with. */
+	nrg = rg;
+	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		if (rg->from > t)
+			break;
+
+		/* If this area reaches higher then extend our area to
+		 * include it completely.  If this is not the first area
+		 * which we intend to reuse, free it. */
+		if (rg->to > t)
+			t = rg->to;
+		if (rg != nrg) {
+			list_del(&rg->link);
+			kfree(rg);
+		}
+	}
+	nrg->from = f;
+	nrg->to = t;
+	return 0;
+}
+
+static long region_chg(struct list_head *head, long f, long t)
+{
+	struct file_region *rg, *nrg;
+	long chg = 0;
+
+	/* Locate the region we are before or in. */
+	list_for_each_entry(rg, head, link)
+		if (f <= rg->to)
+			break;
+
+	/* If we are below the current region then a new region is required.
+	 * Subtle, allocate a new region at the position but make it zero
+	 * size such that we can guarantee to record the reservation. */
+	if (&rg->link == head || t < rg->from) {
+		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
+		if (!nrg)
+			return -ENOMEM;
+		nrg->from = f;
+		nrg->to   = f;
+		INIT_LIST_HEAD(&nrg->link);
+		list_add(&nrg->link, rg->link.prev);
+
+		return t - f;
+	}
+
+	/* Round our left edge to the current segment if it encloses us. */
+	if (f > rg->from)
+		f = rg->from;
+	chg = t - f;
+
+	/* Check for and consume any regions we now overlap with. */
+	list_for_each_entry(rg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		if (rg->from > t)
+			return chg;
+
+		/* We overlap with this area, if it extends futher than
+		 * us then we must extend ourselves.  Account for its
+		 * existing reservation. */
+		if (rg->to > t) {
+			chg += rg->to - t;
+			t = rg->to;
+		}
+		chg -= rg->to - rg->from;
+	}
+	return chg;
+}
+
+static long region_truncate(struct list_head *head, long end)
+{
+	struct file_region *rg, *trg;
+	long chg = 0;
+
+	/* Locate the region we are either in or before. */
+	list_for_each_entry(rg, head, link)
+		if (end <= rg->to)
+			break;
+	if (&rg->link == head)
+		return 0;
+
+	/* If we are in the middle of a region then adjust it. */
+	if (end > rg->from) {
+		chg = rg->to - end;
+		rg->to = end;
+		rg = list_entry(rg->link.next, typeof(*rg), link);
+	}
+
+	/* Drop any remaining regions. */
+	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+		if (&rg->link == head)
+			break;
+		chg += rg->to - rg->from;
+		list_del(&rg->link);
+		kfree(rg);
+	}
+	return chg;
+}
+
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
 				    unsigned long addr, int avoid_reserve)
 {
@@ -1392,127 +1513,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 	flush_tlb_range(vma, start, end);
 }
 
-struct file_region {
-	struct list_head link;
-	long from;
-	long to;
-};
-
-static long region_add(struct list_head *head, long f, long t)
-{
-	struct file_region *rg, *nrg, *trg;
-
-	/* Locate the region we are either in or before. */
-	list_for_each_entry(rg, head, link)
-		if (f <= rg->to)
-			break;
-
-	/* Round our left edge to the current segment if it encloses us. */
-	if (f > rg->from)
-		f = rg->from;
-
-	/* Check for and consume any regions we now overlap with. */
-	nrg = rg;
-	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		if (rg->from > t)
-			break;
-
-		/* If this area reaches higher then extend our area to
-		 * include it completely.  If this is not the first area
-		 * which we intend to reuse, free it. */
-		if (rg->to > t)
-			t = rg->to;
-		if (rg != nrg) {
-			list_del(&rg->link);
-			kfree(rg);
-		}
-	}
-	nrg->from = f;
-	nrg->to = t;
-	return 0;
-}
-
-static long region_chg(struct list_head *head, long f, long t)
-{
-	struct file_region *rg, *nrg;
-	long chg = 0;
-
-	/* Locate the region we are before or in. */
-	list_for_each_entry(rg, head, link)
-		if (f <= rg->to)
-			break;
-
-	/* If we are below the current region then a new region is required.
-	 * Subtle, allocate a new region at the position but make it zero
-	 * size such that we can guarantee to record the reservation. */
-	if (&rg->link == head || t < rg->from) {
-		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
-		if (!nrg)
-			return -ENOMEM;
-		nrg->from = f;
-		nrg->to   = f;
-		INIT_LIST_HEAD(&nrg->link);
-		list_add(&nrg->link, rg->link.prev);
-
-		return t - f;
-	}
-
-	/* Round our left edge to the current segment if it encloses us. */
-	if (f > rg->from)
-		f = rg->from;
-	chg = t - f;
-
-	/* Check for and consume any regions we now overlap with. */
-	list_for_each_entry(rg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		if (rg->from > t)
-			return chg;
-
-		/* We overlap with this area, if it extends futher than
-		 * us then we must extend ourselves.  Account for its
-		 * existing reservation. */
-		if (rg->to > t) {
-			chg += rg->to - t;
-			t = rg->to;
-		}
-		chg -= rg->to - rg->from;
-	}
-	return chg;
-}
-
-static long region_truncate(struct list_head *head, long end)
-{
-	struct file_region *rg, *trg;
-	long chg = 0;
-
-	/* Locate the region we are either in or before. */
-	list_for_each_entry(rg, head, link)
-		if (end <= rg->to)
-			break;
-	if (&rg->link == head)
-		return 0;
-
-	/* If we are in the middle of a region then adjust it. */
-	if (end > rg->from) {
-		chg = rg->to - end;
-		rg->to = end;
-		rg = list_entry(rg->link.next, typeof(*rg), link);
-	}
-
-	/* Drop any remaining regions. */
-	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-		if (&rg->link == head)
-			break;
-		chg += rg->to - rg->from;
-		list_del(&rg->link);
-		kfree(rg);
-	}
-	return chg;
-}
-
 int hugetlb_reserve_pages(struct inode *inode,
 					long from, long to,
 					struct vm_area_struct *vma)

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/3] hugetlb-allow-huge-page-mappings-to-be-created-without-reservations
  2008-05-27 23:09 [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V3 Andy Whitcroft
  2008-05-27 23:09 ` [PATCH 1/3] record MAP_NORESERVE status on vmas and fix small page mprotect reservations Andy Whitcroft
  2008-05-27 23:09 ` [PATCH 2/3] hugetlb-move-reservation-region-support-earlier Andy Whitcroft
@ 2008-05-27 23:10 ` Andy Whitcroft
  2008-05-28 20:53   ` Adam Litke
  2008-05-29  1:51   ` Andrew Morton
  2 siblings, 2 replies; 9+ messages in thread
From: Andy Whitcroft @ 2008-05-27 23:10 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, agl, wli, kenchen, dwg, andi, Mel Gorman,
	dean, abh, Andy Whitcroft

By default all shared mappings and most private mappings now
have reservations associated with them.  This improves semantics by
providing allocation guarentees to the mapper.  However a small number of
applications may attempt to make very large sparse mappings, with these
strict reservations the system will never be able to honour the mapping.

This patch set brings MAP_NORESERVE support to hugetlb files.
This allows new mappings to be made to hugetlbfs files without an
associated reservation, for both shared and private mappings.  This allows
applications which want to create very sparse mappings to opt-out of the
reservation system.  Obviously as there is no reservation they are liable
to fault at runtime if the huge page pool becomes exhausted; buyer beware.

Signed-off-by: Andy Whitcroft <apw@shadowen.org>
---
 mm/hugetlb.c |   60 +++++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 55 insertions(+), 5 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 90a7f5f..118dc54 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -88,6 +88,9 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 /* Decrement the reserved pages in the hugepage pool by one */
 static void decrement_hugepage_resv_vma(struct vm_area_struct *vma)
 {
+	if (vma->vm_flags & VM_NORESERVE)
+		return;
+
 	if (vma->vm_flags & VM_SHARED) {
 		/* Shared mappings always use reserves */
 		resv_huge_pages--;
@@ -682,25 +685,67 @@ static long region_truncate(struct list_head *head, long end)
 	return chg;
 }
 
+/*
+ * Determine if the huge page at addr within the vma has an associated
+ * reservation.  Where it does not we will need to logically increase
+ * reservation and actually increase quota before an allocation can occur.
+ * Where any new reservation would be required the reservation change is
+ * prepared, but not committed.  Once the page has been quota'd allocated
+ * an instantiated the change should be committed via vma_commit_reservation.
+ * No action is required on failure.
+ */
+static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
+{
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	struct inode *inode = mapping->host;
+
+	if (vma->vm_flags & VM_SHARED) {
+		unsigned long idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) +
+				(vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
+		return region_chg(&inode->i_mapping->private_list,
+							idx, idx + 1);
+
+	} else {
+		if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+			return 1;
+	}
+
+	return 0;
+}
+static void vma_commit_reservation(struct vm_area_struct *vma,
+							unsigned long addr)
+{
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	struct inode *inode = mapping->host;
+
+	if (vma->vm_flags & VM_SHARED) {
+		unsigned long idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) +
+				(vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
+		region_add(&inode->i_mapping->private_list, idx, idx + 1);
+	}
+}
+
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
 				    unsigned long addr, int avoid_reserve)
 {
 	struct page *page;
 	struct address_space *mapping = vma->vm_file->f_mapping;
 	struct inode *inode = mapping->host;
-	unsigned int chg = 0;
+	unsigned int chg;
 
 	/*
 	 * Processes that did not create the mapping will have no reserves and
 	 * will not have accounted against quota. Check that the quota can be
 	 * made before satisfying the allocation
+	 * MAP_NORESERVE mappings may also need pages and quota allocated
+	 * if no reserve mapping overlaps.
 	 */
-	if (!(vma->vm_flags & VM_SHARED) &&
-			!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
-		chg = 1;
+	chg = vma_needs_reservation(vma, addr);
+	if (chg < 0)
+		return ERR_PTR(chg);
+	if (chg)
 		if (hugetlb_get_quota(inode->i_mapping, chg))
 			return ERR_PTR(-ENOSPC);
-	}
 
 	spin_lock(&hugetlb_lock);
 	page = dequeue_huge_page_vma(vma, addr, avoid_reserve);
@@ -717,6 +762,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
 	set_page_refcounted(page);
 	set_page_private(page, (unsigned long) mapping);
 
+	vma_commit_reservation(vma, addr);
+
 	return page;
 }
 
@@ -1519,6 +1566,9 @@ int hugetlb_reserve_pages(struct inode *inode,
 {
 	long ret, chg;
 
+	if (vma && vma->vm_flags & VM_NORESERVE)
+		return 0;
+
 	/*
 	 * Shared mappings base their reservation on the number of pages that
 	 * are already allocated on behalf of the file. Private mappings need

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] hugetlb-move-reservation-region-support-earlier
  2008-05-27 23:09 ` [PATCH 2/3] hugetlb-move-reservation-region-support-earlier Andy Whitcroft
@ 2008-05-28 20:38   ` Adam Litke
  0 siblings, 0 replies; 9+ messages in thread
From: Adam Litke @ 2008-05-28 20:38 UTC (permalink / raw)
  To: Andy Whitcroft
  Cc: Andrew Morton, linux-mm, linux-kernel, wli, kenchen, dwg, andi,
	Mel Gorman, dean, abh

On Wed, 2008-05-28 at 00:09 +0100, Andy Whitcroft wrote:
> The following patch will require use of the reservation regions support.
> Move this earlier in the file.  No changes have been made to this code.
> 
> Signed-off-by: Andy Whitcroft <apw@shadowen.org>

Acked-by: Adam Litke <agl@us.ibm.com>

-- 
Adam Litke - (agl at us.ibm.com)
IBM Linux Technology Center

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3] hugetlb-allow-huge-page-mappings-to-be-created-without-reservations
  2008-05-27 23:10 ` [PATCH 3/3] hugetlb-allow-huge-page-mappings-to-be-created-without-reservations Andy Whitcroft
@ 2008-05-28 20:53   ` Adam Litke
  2008-05-29  1:51   ` Andrew Morton
  1 sibling, 0 replies; 9+ messages in thread
From: Adam Litke @ 2008-05-28 20:53 UTC (permalink / raw)
  To: Andy Whitcroft
  Cc: Andrew Morton, linux-mm, linux-kernel, wli, kenchen, dwg, andi,
	Mel Gorman, dean, abh

On Wed, 2008-05-28 at 00:10 +0100, Andy Whitcroft wrote:
> By default all shared mappings and most private mappings now
> have reservations associated with them.  This improves semantics by
> providing allocation guarentees to the mapper.  However a small number of
> applications may attempt to make very large sparse mappings, with these
> strict reservations the system will never be able to honour the mapping.
> 
> This patch set brings MAP_NORESERVE support to hugetlb files.
> This allows new mappings to be made to hugetlbfs files without an
> associated reservation, for both shared and private mappings.  This allows
> applications which want to create very sparse mappings to opt-out of the
> reservation system.  Obviously as there is no reservation they are liable
> to fault at runtime if the huge page pool becomes exhausted; buyer beware.
> 
> Signed-off-by: Andy Whitcroft <apw@shadowen.org>
> ---
>  mm/hugetlb.c |   60 +++++++++++++++++++++++++++++++++++++++++++++++++++++----
>  1 files changed, 55 insertions(+), 5 deletions(-)
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 90a7f5f..118dc54 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -88,6 +88,9 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
>  /* Decrement the reserved pages in the hugepage pool by one */
>  static void decrement_hugepage_resv_vma(struct vm_area_struct *vma)
>  {
> +	if (vma->vm_flags & VM_NORESERVE)
> +		return;
> +
>  	if (vma->vm_flags & VM_SHARED) {
>  		/* Shared mappings always use reserves */
>  		resv_huge_pages--;
> @@ -682,25 +685,67 @@ static long region_truncate(struct list_head *head, long end)
>  	return chg;
>  }
> 
> +/*
> + * Determine if the huge page at addr within the vma has an associated
> + * reservation.  Where it does not we will need to logically increase
> + * reservation and actually increase quota before an allocation can occur.
> + * Where any new reservation would be required the reservation change is
> + * prepared, but not committed.  Once the page has been quota'd allocated
> + * an instantiated the change should be committed via vma_commit_reservation.
> + * No action is required on failure.
> + */
> +static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)

To me, this function has an odd name and led to some confusion when I
read the patch.  This naming suggests that the function determines
_whether_or_not_ a particular page requires a reservation when in fact
it is determining a number of pages required and then (to use your
wording in the comments) prepares said reservation.  Could we rename it
to vma_prepare_reservation() or something?  I feel that would also align
it with vma_commit_reservation() a bit more.

-- 
Adam Litke - (agl at us.ibm.com)
IBM Linux Technology Center

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3] hugetlb-allow-huge-page-mappings-to-be-created-without-reservations
  2008-05-27 23:10 ` [PATCH 3/3] hugetlb-allow-huge-page-mappings-to-be-created-without-reservations Andy Whitcroft
  2008-05-28 20:53   ` Adam Litke
@ 2008-05-29  1:51   ` Andrew Morton
  1 sibling, 0 replies; 9+ messages in thread
From: Andrew Morton @ 2008-05-29  1:51 UTC (permalink / raw)
  To: Andy Whitcroft
  Cc: linux-mm, linux-kernel, agl, wli, kenchen, dwg, andi, Mel Gorman,
	dean, abh, Michael Kerrisk

On Wed, 28 May 2008 00:10:06 +0100 Andy Whitcroft <apw@shadowen.org> wrote:

> +		unsigned long idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) +
> +				(vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
> +		return region_chg(&inode->i_mapping->private_list,
> +							idx, idx + 1);
> +
> +	} else {
> +		if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER))
> +			return 1;
> +	}
> +
> +	return 0;
> +}
> +static void vma_commit_reservation(struct vm_area_struct *vma,
> +							unsigned long addr)
> +{
> +	struct address_space *mapping = vma->vm_file->f_mapping;
> +	struct inode *inode = mapping->host;
> +
> +	if (vma->vm_flags & VM_SHARED) {
> +		unsigned long idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) +
> +				(vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
> +		region_add(&inode->i_mapping->private_list, idx, idx + 1);

There are a couple more users of the little helper function which I
suggested that Mel add.

They both use ulong too - I do think that pgoff_t has a little
documentary value.

I guess these changes impact the manpages, but the mmap manpage doesn't
seem to know about huge pages at all.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2008-05-29  1:51 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-05-27 23:09 [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V3 Andy Whitcroft
2008-05-27 23:09 ` [PATCH 1/3] record MAP_NORESERVE status on vmas and fix small page mprotect reservations Andy Whitcroft
2008-05-27 23:09 ` [PATCH 2/3] hugetlb-move-reservation-region-support-earlier Andy Whitcroft
2008-05-28 20:38   ` Adam Litke
2008-05-27 23:10 ` [PATCH 3/3] hugetlb-allow-huge-page-mappings-to-be-created-without-reservations Andy Whitcroft
2008-05-28 20:53   ` Adam Litke
2008-05-29  1:51   ` Andrew Morton
  -- strict thread matches above, loose matches on Subject: below --
2008-05-20 16:54 [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V2 Andy Whitcroft
2008-05-20 16:55 ` [PATCH 2/3] hugetlb-move-reservation-region-support-earlier Andy Whitcroft
2008-05-07 20:24 [PATCH 0/3] MAP_NORESERVE for hugetlb mappings V1 Andy Whitcroft
2008-05-07 20:24 ` [PATCH 2/3] hugetlb-move-reservation-region-support-earlier Andy Whitcroft

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).