* [PATCH for-next] rdma_rxe: address an issue with hardened user copy
@ 2020-08-25 16:58 Bob Pearson
2020-08-27 1:21 ` Zhu Yanjun
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Bob Pearson @ 2020-08-25 16:58 UTC (permalink / raw)
To: leon, zyjzyj2000, linux-rdma; +Cc: Bob Pearson
Change rxe pools to use kzalloc instead of kmem_cache to allocate
memory for rxe objects.
Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
drivers/infiniband/sw/rxe/rxe.c | 8 ----
drivers/infiniband/sw/rxe/rxe_pool.c | 60 +---------------------------
drivers/infiniband/sw/rxe/rxe_pool.h | 7 ----
3 files changed, 2 insertions(+), 73 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index cc395da13eff..a1ff70e0b1f8 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -277,13 +277,6 @@ static int __init rxe_module_init(void)
{
int err;
- /* initialize slab caches for managed objects */
- err = rxe_cache_init();
- if (err) {
- pr_err("unable to init object pools\n");
- return err;
- }
-
err = rxe_net_init();
if (err)
return err;
@@ -298,7 +291,6 @@ static void __exit rxe_module_exit(void)
rdma_link_unregister(&rxe_link_ops);
ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit();
- rxe_cache_exit();
pr_info("unloaded\n");
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index c0fab4a65f9e..70fc9f7a25b6 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -84,62 +84,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
return rxe_type_info[pool->type].name;
}
-static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
-{
- return rxe_type_info[pool->type].cache;
-}
-
-static void rxe_cache_clean(size_t cnt)
-{
- int i;
- struct rxe_type_info *type;
-
- for (i = 0; i < cnt; i++) {
- type = &rxe_type_info[i];
- if (!(type->flags & RXE_POOL_NO_ALLOC)) {
- kmem_cache_destroy(type->cache);
- type->cache = NULL;
- }
- }
-}
-
-int rxe_cache_init(void)
-{
- int err;
- int i;
- size_t size;
- struct rxe_type_info *type;
-
- for (i = 0; i < RXE_NUM_TYPES; i++) {
- type = &rxe_type_info[i];
- size = ALIGN(type->size, RXE_POOL_ALIGN);
- if (!(type->flags & RXE_POOL_NO_ALLOC)) {
- type->cache =
- kmem_cache_create(type->name, size,
- RXE_POOL_ALIGN,
- RXE_POOL_CACHE_FLAGS, NULL);
- if (!type->cache) {
- pr_err("Unable to init kmem cache for %s\n",
- type->name);
- err = -ENOMEM;
- goto err1;
- }
- }
- }
-
- return 0;
-
-err1:
- rxe_cache_clean(i);
-
- return err;
-}
-
-void rxe_cache_exit(void)
-{
- rxe_cache_clean(RXE_NUM_TYPES);
-}
-
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
int err = 0;
@@ -381,7 +325,7 @@ void *rxe_alloc(struct rxe_pool *pool)
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- elem = kmem_cache_zalloc(pool_cache(pool),
+ elem = kzalloc(rxe_type_info[pool->type].size,
(pool->flags & RXE_POOL_ATOMIC) ?
GFP_ATOMIC : GFP_KERNEL);
if (!elem)
@@ -443,7 +387,7 @@ void rxe_elem_release(struct kref *kref)
pool->cleanup(elem);
if (!(pool->flags & RXE_POOL_NO_ALLOC))
- kmem_cache_free(pool_cache(pool), elem);
+ kfree(elem);
atomic_dec(&pool->num_elem);
ib_device_put(&pool->rxe->ib_dev);
rxe_pool_put(pool);
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 64d92be3f060..3d722aae5f15 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -42,7 +42,6 @@ struct rxe_type_info {
u32 min_index;
size_t key_offset;
size_t key_size;
- struct kmem_cache *cache;
};
extern struct rxe_type_info rxe_type_info[];
@@ -96,12 +95,6 @@ struct rxe_pool {
} key;
};
-/* initialize slab caches for managed objects */
-int rxe_cache_init(void);
-
-/* cleanup slab caches for managed objects */
-void rxe_cache_exit(void);
-
/* initialize a pool of objects with given limit on
* number of elements. gets parameters from rxe_type_info
* pool elements will be allocated out of a slab cache
--
2.25.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH for-next] rdma_rxe: address an issue with hardened user copy
2020-08-25 16:58 [PATCH for-next] rdma_rxe: address an issue with hardened user copy Bob Pearson
@ 2020-08-27 1:21 ` Zhu Yanjun
2020-08-27 1:30 ` Bob Pearson
2020-08-27 7:02 ` Leon Romanovsky
2020-08-27 13:58 ` Jason Gunthorpe
2 siblings, 1 reply; 6+ messages in thread
From: Zhu Yanjun @ 2020-08-27 1:21 UTC (permalink / raw)
To: Bob Pearson, leon, linux-rdma; +Cc: Bob Pearson
On 8/26/2020 12:58 AM, Bob Pearson wrote:
> Change rxe pools to use kzalloc instead of kmem_cache to allocate
Why do you use kzalloc instead of kmem_cache? For performance or some bugs?
Zhu Yanjun
> memory for rxe objects.
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
> drivers/infiniband/sw/rxe/rxe.c | 8 ----
> drivers/infiniband/sw/rxe/rxe_pool.c | 60 +---------------------------
> drivers/infiniband/sw/rxe/rxe_pool.h | 7 ----
> 3 files changed, 2 insertions(+), 73 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
> index cc395da13eff..a1ff70e0b1f8 100644
> --- a/drivers/infiniband/sw/rxe/rxe.c
> +++ b/drivers/infiniband/sw/rxe/rxe.c
> @@ -277,13 +277,6 @@ static int __init rxe_module_init(void)
> {
> int err;
>
> - /* initialize slab caches for managed objects */
> - err = rxe_cache_init();
> - if (err) {
> - pr_err("unable to init object pools\n");
> - return err;
> - }
> -
> err = rxe_net_init();
> if (err)
> return err;
> @@ -298,7 +291,6 @@ static void __exit rxe_module_exit(void)
> rdma_link_unregister(&rxe_link_ops);
> ib_unregister_driver(RDMA_DRIVER_RXE);
> rxe_net_exit();
> - rxe_cache_exit();
>
> pr_info("unloaded\n");
> }
> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
> index c0fab4a65f9e..70fc9f7a25b6 100644
> --- a/drivers/infiniband/sw/rxe/rxe_pool.c
> +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
> @@ -84,62 +84,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
> return rxe_type_info[pool->type].name;
> }
>
> -static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
> -{
> - return rxe_type_info[pool->type].cache;
> -}
> -
> -static void rxe_cache_clean(size_t cnt)
> -{
> - int i;
> - struct rxe_type_info *type;
> -
> - for (i = 0; i < cnt; i++) {
> - type = &rxe_type_info[i];
> - if (!(type->flags & RXE_POOL_NO_ALLOC)) {
> - kmem_cache_destroy(type->cache);
> - type->cache = NULL;
> - }
> - }
> -}
> -
> -int rxe_cache_init(void)
> -{
> - int err;
> - int i;
> - size_t size;
> - struct rxe_type_info *type;
> -
> - for (i = 0; i < RXE_NUM_TYPES; i++) {
> - type = &rxe_type_info[i];
> - size = ALIGN(type->size, RXE_POOL_ALIGN);
> - if (!(type->flags & RXE_POOL_NO_ALLOC)) {
> - type->cache =
> - kmem_cache_create(type->name, size,
> - RXE_POOL_ALIGN,
> - RXE_POOL_CACHE_FLAGS, NULL);
> - if (!type->cache) {
> - pr_err("Unable to init kmem cache for %s\n",
> - type->name);
> - err = -ENOMEM;
> - goto err1;
> - }
> - }
> - }
> -
> - return 0;
> -
> -err1:
> - rxe_cache_clean(i);
> -
> - return err;
> -}
> -
> -void rxe_cache_exit(void)
> -{
> - rxe_cache_clean(RXE_NUM_TYPES);
> -}
> -
> static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
> {
> int err = 0;
> @@ -381,7 +325,7 @@ void *rxe_alloc(struct rxe_pool *pool)
> if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
> goto out_cnt;
>
> - elem = kmem_cache_zalloc(pool_cache(pool),
> + elem = kzalloc(rxe_type_info[pool->type].size,
> (pool->flags & RXE_POOL_ATOMIC) ?
> GFP_ATOMIC : GFP_KERNEL);
> if (!elem)
> @@ -443,7 +387,7 @@ void rxe_elem_release(struct kref *kref)
> pool->cleanup(elem);
>
> if (!(pool->flags & RXE_POOL_NO_ALLOC))
> - kmem_cache_free(pool_cache(pool), elem);
> + kfree(elem);
> atomic_dec(&pool->num_elem);
> ib_device_put(&pool->rxe->ib_dev);
> rxe_pool_put(pool);
> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
> index 64d92be3f060..3d722aae5f15 100644
> --- a/drivers/infiniband/sw/rxe/rxe_pool.h
> +++ b/drivers/infiniband/sw/rxe/rxe_pool.h
> @@ -42,7 +42,6 @@ struct rxe_type_info {
> u32 min_index;
> size_t key_offset;
> size_t key_size;
> - struct kmem_cache *cache;
> };
>
> extern struct rxe_type_info rxe_type_info[];
> @@ -96,12 +95,6 @@ struct rxe_pool {
> } key;
> };
>
> -/* initialize slab caches for managed objects */
> -int rxe_cache_init(void);
> -
> -/* cleanup slab caches for managed objects */
> -void rxe_cache_exit(void);
> -
> /* initialize a pool of objects with given limit on
> * number of elements. gets parameters from rxe_type_info
> * pool elements will be allocated out of a slab cache
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH for-next] rdma_rxe: address an issue with hardened user copy
2020-08-27 1:21 ` Zhu Yanjun
@ 2020-08-27 1:30 ` Bob Pearson
0 siblings, 0 replies; 6+ messages in thread
From: Bob Pearson @ 2020-08-27 1:30 UTC (permalink / raw)
To: Zhu Yanjun, leon, linux-rdma; +Cc: Bob Pearson
On 8/26/20 8:21 PM, Zhu Yanjun wrote:
> On 8/26/2020 12:58 AM, Bob Pearson wrote:
>> Change rxe pools to use kzalloc instead of kmem_cache to allocate
>
> Why do you use kzalloc instead of kmem_cache? For performance or some bugs?
>
> Zhu Yanjun
>
>> memory for rxe objects.
>>
>> Signed-off-by: Bob Pearson <rpearson@hpe.com>
>> ---
>> drivers/infiniband/sw/rxe/rxe.c | 8 ----
>> drivers/infiniband/sw/rxe/rxe_pool.c | 60 +---------------------------
>> drivers/infiniband/sw/rxe/rxe_pool.h | 7 ----
>> 3 files changed, 2 insertions(+), 73 deletions(-)
>>
>> diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
>> index cc395da13eff..a1ff70e0b1f8 100644
>> --- a/drivers/infiniband/sw/rxe/rxe.c
>> +++ b/drivers/infiniband/sw/rxe/rxe.c
>> @@ -277,13 +277,6 @@ static int __init rxe_module_init(void)
>> {
>> int err;
>> - /* initialize slab caches for managed objects */
>> - err = rxe_cache_init();
>> - if (err) {
>> - pr_err("unable to init object pools\n");
>> - return err;
>> - }
>> -
>> err = rxe_net_init();
>> if (err)
>> return err;
>> @@ -298,7 +291,6 @@ static void __exit rxe_module_exit(void)
>> rdma_link_unregister(&rxe_link_ops);
>> ib_unregister_driver(RDMA_DRIVER_RXE);
>> rxe_net_exit();
>> - rxe_cache_exit();
>> pr_info("unloaded\n");
>> }
>> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
>> index c0fab4a65f9e..70fc9f7a25b6 100644
>> --- a/drivers/infiniband/sw/rxe/rxe_pool.c
>> +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
>> @@ -84,62 +84,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
>> return rxe_type_info[pool->type].name;
>> }
>> -static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
>> -{
>> - return rxe_type_info[pool->type].cache;
>> -}
>> -
>> -static void rxe_cache_clean(size_t cnt)
>> -{
>> - int i;
>> - struct rxe_type_info *type;
>> -
>> - for (i = 0; i < cnt; i++) {
>> - type = &rxe_type_info[i];
>> - if (!(type->flags & RXE_POOL_NO_ALLOC)) {
>> - kmem_cache_destroy(type->cache);
>> - type->cache = NULL;
>> - }
>> - }
>> -}
>> -
>> -int rxe_cache_init(void)
>> -{
>> - int err;
>> - int i;
>> - size_t size;
>> - struct rxe_type_info *type;
>> -
>> - for (i = 0; i < RXE_NUM_TYPES; i++) {
>> - type = &rxe_type_info[i];
>> - size = ALIGN(type->size, RXE_POOL_ALIGN);
>> - if (!(type->flags & RXE_POOL_NO_ALLOC)) {
>> - type->cache =
>> - kmem_cache_create(type->name, size,
>> - RXE_POOL_ALIGN,
>> - RXE_POOL_CACHE_FLAGS, NULL);
>> - if (!type->cache) {
>> - pr_err("Unable to init kmem cache for %s\n",
>> - type->name);
>> - err = -ENOMEM;
>> - goto err1;
>> - }
>> - }
>> - }
>> -
>> - return 0;
>> -
>> -err1:
>> - rxe_cache_clean(i);
>> -
>> - return err;
>> -}
>> -
>> -void rxe_cache_exit(void)
>> -{
>> - rxe_cache_clean(RXE_NUM_TYPES);
>> -}
>> -
>> static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
>> {
>> int err = 0;
>> @@ -381,7 +325,7 @@ void *rxe_alloc(struct rxe_pool *pool)
>> if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
>> goto out_cnt;
>> - elem = kmem_cache_zalloc(pool_cache(pool),
>> + elem = kzalloc(rxe_type_info[pool->type].size,
>> (pool->flags & RXE_POOL_ATOMIC) ?
>> GFP_ATOMIC : GFP_KERNEL);
>> if (!elem)
>> @@ -443,7 +387,7 @@ void rxe_elem_release(struct kref *kref)
>> pool->cleanup(elem);
>> if (!(pool->flags & RXE_POOL_NO_ALLOC))
>> - kmem_cache_free(pool_cache(pool), elem);
>> + kfree(elem);
>> atomic_dec(&pool->num_elem);
>> ib_device_put(&pool->rxe->ib_dev);
>> rxe_pool_put(pool);
>> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
>> index 64d92be3f060..3d722aae5f15 100644
>> --- a/drivers/infiniband/sw/rxe/rxe_pool.h
>> +++ b/drivers/infiniband/sw/rxe/rxe_pool.h
>> @@ -42,7 +42,6 @@ struct rxe_type_info {
>> u32 min_index;
>> size_t key_offset;
>> size_t key_size;
>> - struct kmem_cache *cache;
>> };
>> extern struct rxe_type_info rxe_type_info[];
>> @@ -96,12 +95,6 @@ struct rxe_pool {
>> } key;
>> };
>> -/* initialize slab caches for managed objects */
>> -int rxe_cache_init(void);
>> -
>> -/* cleanup slab caches for managed objects */
>> -void rxe_cache_exit(void);
>> -
>> /* initialize a pool of objects with given limit on
>> * number of elements. gets parameters from rxe_type_info
>> * pool elements will be allocated out of a slab cache
>
>
There is a regression in rxe caused by the hardened usercopy patches. It leads to a kernel warning the first time a QP is created each boot. The origin has been discussed in several emails between Leon myself and the list. There are a lot of ways to eliminate the warning but so far there has been resistance to any of these fixes. As far as I can tell there is no performance hit from moving from kmem_cache to kzalloc. (kzalloc and kmalloc just use pre defined caches with objects that are powers of 2 in size.) I am waiting for Leon to express an opinion on this solution. He also has a proposal to allocate QP objects in the core (like PD, etc).
Bob
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH for-next] rdma_rxe: address an issue with hardened user copy
2020-08-25 16:58 [PATCH for-next] rdma_rxe: address an issue with hardened user copy Bob Pearson
2020-08-27 1:21 ` Zhu Yanjun
@ 2020-08-27 7:02 ` Leon Romanovsky
2020-08-27 13:58 ` Jason Gunthorpe
2 siblings, 0 replies; 6+ messages in thread
From: Leon Romanovsky @ 2020-08-27 7:02 UTC (permalink / raw)
To: Bob Pearson; +Cc: zyjzyj2000, linux-rdma, Bob Pearson
On Tue, Aug 25, 2020 at 11:58:37AM -0500, Bob Pearson wrote:
> Change rxe pools to use kzalloc instead of kmem_cache to allocate
> memory for rxe objects.
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
> drivers/infiniband/sw/rxe/rxe.c | 8 ----
> drivers/infiniband/sw/rxe/rxe_pool.c | 60 +---------------------------
> drivers/infiniband/sw/rxe/rxe_pool.h | 7 ----
> 3 files changed, 2 insertions(+), 73 deletions(-)
>
I liked this solution.
Thanks,
Acked-by: Leon Romanovsky <leonro@nvidia.com>
BTW, didn't test/"review deeply" the patch so take my Acked-by with grain of salt.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH for-next] rdma_rxe: address an issue with hardened user copy
2020-08-25 16:58 [PATCH for-next] rdma_rxe: address an issue with hardened user copy Bob Pearson
2020-08-27 1:21 ` Zhu Yanjun
2020-08-27 7:02 ` Leon Romanovsky
@ 2020-08-27 13:58 ` Jason Gunthorpe
2020-08-27 16:43 ` Bob Pearson
2 siblings, 1 reply; 6+ messages in thread
From: Jason Gunthorpe @ 2020-08-27 13:58 UTC (permalink / raw)
To: Bob Pearson; +Cc: leon, zyjzyj2000, linux-rdma, Bob Pearson
On Tue, Aug 25, 2020 at 11:58:37AM -0500, Bob Pearson wrote:
> Change rxe pools to use kzalloc instead of kmem_cache to allocate
> memory for rxe objects.
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> drivers/infiniband/sw/rxe/rxe.c | 8 ----
> drivers/infiniband/sw/rxe/rxe_pool.c | 60 +---------------------------
> drivers/infiniband/sw/rxe/rxe_pool.h | 7 ----
> 3 files changed, 2 insertions(+), 73 deletions(-)
It doesn't apply:
Applying: rdma_rxe: address an issue with hardened user copy
error: sha1 information is lacking or useless (drivers/infiniband/sw/rxe/rxe.c).
error: could not build fake ancestor
Patch failed at 0001 rdma_rxe: address an issue with hardened user copy
hint: Use 'git am --show-current-patch=diff' to see the failed patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".
Pleae generate patches against a v5.x tag or rdma for-next
Jason
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH for-next] rdma_rxe: address an issue with hardened user copy
2020-08-27 13:58 ` Jason Gunthorpe
@ 2020-08-27 16:43 ` Bob Pearson
0 siblings, 0 replies; 6+ messages in thread
From: Bob Pearson @ 2020-08-27 16:43 UTC (permalink / raw)
To: Jason Gunthorpe; +Cc: leon, zyjzyj2000, linux-rdma, Bob Pearson
On 8/27/20 8:58 AM, Jason Gunthorpe wrote:
> On Tue, Aug 25, 2020 at 11:58:37AM -0500, Bob Pearson wrote:
>> Change rxe pools to use kzalloc instead of kmem_cache to allocate
>> memory for rxe objects.
>>
>> Signed-off-by: Bob Pearson <rpearson@hpe.com>
>> drivers/infiniband/sw/rxe/rxe.c | 8 ----
>> drivers/infiniband/sw/rxe/rxe_pool.c | 60 +---------------------------
>> drivers/infiniband/sw/rxe/rxe_pool.h | 7 ----
>> 3 files changed, 2 insertions(+), 73 deletions(-)
>
> It doesn't apply:
>
> Applying: rdma_rxe: address an issue with hardened user copy
> error: sha1 information is lacking or useless (drivers/infiniband/sw/rxe/rxe.c).
> error: could not build fake ancestor
> Patch failed at 0001 rdma_rxe: address an issue with hardened user copy
> hint: Use 'git am --show-current-patch=diff' to see the failed patch
> When you have resolved this problem, run "git am --continue".
> If you prefer to skip this patch, run "git am --skip" instead.
> To restore the original branch and stop patching, run "git am --abort".
>
> Pleae generate patches against a v5.x tag or rdma for-next
>
> Jason
>
I fixed it and sent it again.
Applies to today's for-next branch. It is independent from the SPDX patch but the line numbers will move a bunch. Hope that doesn't break it.
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2020-08-27 16:43 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-08-25 16:58 [PATCH for-next] rdma_rxe: address an issue with hardened user copy Bob Pearson
2020-08-27 1:21 ` Zhu Yanjun
2020-08-27 1:30 ` Bob Pearson
2020-08-27 7:02 ` Leon Romanovsky
2020-08-27 13:58 ` Jason Gunthorpe
2020-08-27 16:43 ` Bob Pearson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox