From: Dave Chinner <david@fromorbit.com>
To: linux-fsdevel@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Subject: [PATCH 09/18] fs: rework icount to be a locked variable
Date: Fri, 8 Oct 2010 16:21:23 +1100 [thread overview]
Message-ID: <1286515292-15882-10-git-send-email-david@fromorbit.com> (raw)
In-Reply-To: <1286515292-15882-1-git-send-email-david@fromorbit.com>
From: Dave Chinner <dchinner@redhat.com>
The inode reference count is currently an atomic variable so that it can be
sampled/modified outside the inode_lock. However, the inode_lock is still
needed to synchronise the final reference count and checks against the inode
state.
To avoid needing the protection of the inode lock, protect the inode reference
count with the per-inode i_lock and convert it to a normal variable. To avoid
existing out-of-tree code accidentally compiling against the new method, rename
the i_count field to i_ref. This is relatively straight forward as there
are limited external references to the i_count field remaining.
Based on work originally from Nick Piggin.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
fs/btrfs/inode.c | 8 ++++-
fs/inode.c | 83 ++++++++++++++++++++++++++++++++++++-----------
fs/nfs/nfs4state.c | 2 +-
fs/nilfs2/mdt.c | 2 +-
fs/notify/inode_mark.c | 16 ++++++---
include/linux/fs.h | 2 +-
6 files changed, 84 insertions(+), 29 deletions(-)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 2953e9f..9f04478 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1964,8 +1964,14 @@ void btrfs_add_delayed_iput(struct inode *inode)
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct delayed_iput *delayed;
- if (atomic_add_unless(&inode->i_count, -1, 1))
+ /* XXX: filesystems should not play refcount games like this */
+ spin_lock(&inode->i_lock);
+ if (inode->i_ref > 1) {
+ inode->i_ref--;
+ spin_unlock(&inode->i_lock);
return;
+ }
+ spin_unlock(&inode->i_lock);
delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
delayed->inode = inode;
diff --git a/fs/inode.c b/fs/inode.c
index b1dc6dc..5c8a3ea 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -26,6 +26,13 @@
#include <linux/posix_acl.h>
/*
+ * Locking rules.
+ *
+ * inode->i_lock protects:
+ * i_ref
+ */
+
+/*
* This is needed for the following functions:
* - inode_has_buffers
* - invalidate_inode_buffers
@@ -64,9 +71,9 @@ static unsigned int i_hash_shift __read_mostly;
* Each inode can be on two separate lists. One is
* the hash list of the inode, used for lookups. The
* other linked list is the "type" list:
- * "in_use" - valid inode, i_count > 0, i_nlink > 0
+ * "in_use" - valid inode, i_ref > 0, i_nlink > 0
* "dirty" - as "in_use" but also dirty
- * "unused" - valid inode, i_count = 0
+ * "unused" - valid inode, i_ref = 0
*
* A "dirty" list is maintained for each super block,
* allowing for low-overhead inode sync() operations.
@@ -164,7 +171,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
- atomic_set(&inode->i_count, 1);
+ inode->i_ref = 1;
inode->i_op = &empty_iops;
inode->i_fop = &empty_fops;
inode->i_nlink = 1;
@@ -313,31 +320,38 @@ static void init_once(void *foo)
inode_init_once(inode);
}
+
+/*
+ * inode_lock must be held
+ */
+void iref_locked(struct inode *inode)
+{
+ inode->i_ref++;
+}
EXPORT_SYMBOL_GPL(iref_locked);
void iref(struct inode *inode)
{
spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
iref_locked(inode);
+ spin_unlock(&inode->i_lock);
spin_unlock(&inode_lock);
}
EXPORT_SYMBOL_GPL(iref);
/*
- * inode_lock must be held
- */
-void iref_locked(struct inode *inode)
-{
- atomic_inc(&inode->i_count);
-}
-
-/*
* Nobody outside of core code should really be looking at the inode reference
* count. Please don't add new users of this function.
*/
int iref_read(struct inode *inode)
{
- return atomic_read(&inode->i_count);
+ int ref;
+
+ spin_lock(&inode->i_lock);
+ ref = inode->i_ref;
+ spin_unlock(&inode->i_lock);
+ return ref;
}
EXPORT_SYMBOL_GPL(iref_read);
@@ -425,7 +439,9 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
if (inode->i_state & I_NEW)
continue;
invalidate_inode_buffers(inode);
- if (!atomic_read(&inode->i_count)) {
+ spin_lock(&inode->i_lock);
+ if (!inode->i_ref) {
+ spin_unlock(&inode->i_lock);
list_move(&inode->i_lru, dispose);
list_del_init(&inode->i_io);
WARN_ON(inode->i_state & I_NEW);
@@ -433,6 +449,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
percpu_counter_dec(&nr_inodes_unused);
continue;
}
+ spin_unlock(&inode->i_lock);
busy = 1;
}
return busy;
@@ -470,7 +487,7 @@ static int can_unuse(struct inode *inode)
return 0;
if (inode_has_buffers(inode))
return 0;
- if (atomic_read(&inode->i_count))
+ if (iref_read(inode))
return 0;
if (inode->i_data.nrpages)
return 0;
@@ -506,19 +523,22 @@ static void prune_icache(int nr_to_scan)
inode = list_entry(inode_unused.prev, struct inode, i_lru);
- if (atomic_read(&inode->i_count) ||
- (inode->i_state & ~I_REFERENCED)) {
+ spin_lock(&inode->i_lock);
+ if (inode->i_ref || (inode->i_state & ~I_REFERENCED)) {
+ spin_unlock(&inode->i_lock);
list_del_init(&inode->i_lru);
percpu_counter_dec(&nr_inodes_unused);
continue;
}
if (inode->i_state & I_REFERENCED) {
+ spin_unlock(&inode->i_lock);
list_move(&inode->i_lru, &inode_unused);
inode->i_state &= ~I_REFERENCED;
continue;
}
if (inode_has_buffers(inode) || inode->i_data.nrpages) {
iref_locked(inode);
+ spin_unlock(&inode->i_lock);
spin_unlock(&inode_lock);
if (remove_inode_buffers(inode))
reap += invalidate_mapping_pages(&inode->i_data,
@@ -535,7 +555,8 @@ static void prune_icache(int nr_to_scan)
list_move(&inode->i_lru, &inode_unused);
continue;
}
- }
+ } else
+ spin_unlock(&inode->i_lock);
list_move(&inode->i_lru, &freeable);
list_del_init(&inode->i_io);
WARN_ON(inode->i_state & I_NEW);
@@ -788,7 +809,9 @@ static struct inode *get_new_inode(struct super_block *sb,
* us. Use the old inode instead of the one we just
* allocated.
*/
+ spin_lock(&old->i_lock);
iref_locked(old);
+ spin_unlock(&old->i_lock);
spin_unlock(&inode_lock);
destroy_inode(inode);
inode = old;
@@ -835,7 +858,9 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
* us. Use the old inode instead of the one we just
* allocated.
*/
+ spin_lock(&old->i_lock);
iref_locked(old);
+ spin_unlock(&old->i_lock);
spin_unlock(&inode_lock);
destroy_inode(inode);
inode = old;
@@ -887,9 +912,11 @@ EXPORT_SYMBOL(iunique);
struct inode *igrab(struct inode *inode)
{
spin_lock(&inode_lock);
- if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
+ if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
+ spin_lock(&inode->i_lock);
iref_locked(inode);
- else
+ spin_unlock(&inode->i_lock);
+ } else
/*
* Handle the case where s_op->clear_inode is not been
* called yet, and somebody is calling igrab
@@ -929,7 +956,9 @@ static struct inode *ifind(struct super_block *sb,
spin_lock(&inode_lock);
inode = find_inode(sb, head, test, data);
if (inode) {
+ spin_lock(&inode->i_lock);
iref_locked(inode);
+ spin_unlock(&inode->i_lock);
spin_unlock(&inode_lock);
if (likely(wait))
wait_on_inode(inode);
@@ -962,7 +991,9 @@ static struct inode *ifind_fast(struct super_block *sb,
spin_lock(&inode_lock);
inode = find_inode_fast(sb, head, ino);
if (inode) {
+ spin_lock(&inode->i_lock);
iref_locked(inode);
+ spin_unlock(&inode->i_lock);
spin_unlock(&inode_lock);
wait_on_inode(inode);
return inode;
@@ -1145,7 +1176,9 @@ int insert_inode_locked(struct inode *inode)
spin_unlock(&inode_lock);
return 0;
}
+ spin_lock(&old->i_lock);
iref_locked(old);
+ spin_unlock(&old->i_lock);
spin_unlock(&inode_lock);
wait_on_inode(old);
if (unlikely(!hlist_unhashed(&old->i_hash))) {
@@ -1184,7 +1217,9 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
spin_unlock(&inode_lock);
return 0;
}
+ spin_lock(&old->i_lock);
iref_locked(old);
+ spin_unlock(&old->i_lock);
spin_unlock(&inode_lock);
wait_on_inode(old);
if (unlikely(!hlist_unhashed(&old->i_hash))) {
@@ -1324,8 +1359,16 @@ void iput(struct inode *inode)
if (inode) {
BUG_ON(inode->i_state & I_CLEAR);
- if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
+ spin_lock(&inode_lock);
+ spin_lock(&inode->i_lock);
+ inode->i_ref--;
+ if (inode->i_ref == 0) {
+ spin_unlock(&inode->i_lock);
iput_final(inode);
+ return;
+ }
+ spin_unlock(&inode->i_lock);
+ spin_lock(&inode_lock);
}
}
EXPORT_SYMBOL(iput);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3e2f19b..d7fc5d0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -506,8 +506,8 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
state->owner = owner;
atomic_inc(&owner->so_count);
list_add(&state->inode_states, &nfsi->open_states);
- state->inode = igrab(inode);
spin_unlock(&inode->i_lock);
+ state->inode = igrab(inode);
/* Note: The reclaim code dictates that we add stateless
* and read-only stateids to the end of the list */
list_add_tail(&state->open_states, &owner->so_states);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 2ee524f..435ba11 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -480,7 +480,7 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
inode->i_sb = sb; /* sb may be NULL for some meta data files */
inode->i_blkbits = nilfs->ns_blocksize_bits;
inode->i_flags = 0;
- atomic_set(&inode->i_count, 1);
+ inode->i_ref = 1;
inode->i_nlink = 1;
inode->i_ino = ino;
inode->i_mode = S_IFREG;
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 6c54e02..2fe319b 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -257,7 +257,8 @@ void fsnotify_unmount_inodes(struct list_head *list)
* actually evict all unreferenced inodes from icache which is
* unnecessarily violent and may in fact be illegal to do.
*/
- if (!iref_read(inode))
+ spin_lock(&inode->i_lock);
+ if (!inode->i_ref)
continue;
need_iput_tmp = need_iput;
@@ -268,12 +269,17 @@ void fsnotify_unmount_inodes(struct list_head *list)
iref_locked(inode);
else
need_iput_tmp = NULL;
+ spin_unlock(&inode->i_lock);
/* In case the dropping of a reference would nuke next_i. */
- if ((&next_i->i_sb_list != list) && iref_read(inode) &&
- !(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
- iref_locked(next_i);
- need_iput = next_i;
+ if (&next_i->i_sb_list != list) {
+ spin_lock(&next_i->i_lock);
+ if (inode->i_ref &&
+ !(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
+ iref_locked(next_i);
+ need_iput = next_i;
+ }
+ spin_unlock(&next_i->i_lock);
}
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6f0df2a..1162c10 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -730,7 +730,7 @@ struct inode {
struct list_head i_sb_list;
struct list_head i_dentry;
unsigned long i_ino;
- atomic_t i_count;
+ unsigned int i_ref;
unsigned int i_nlink;
uid_t i_uid;
gid_t i_gid;
--
1.7.1
next prev parent reply other threads:[~2010-10-08 5:21 UTC|newest]
Thread overview: 162+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-10-08 5:21 fs: Inode cache scalability V2 Dave Chinner
2010-10-08 5:21 ` [PATCH 01/18] kernel: add bl_list Dave Chinner
2010-10-08 8:18 ` Andi Kleen
2010-10-08 10:33 ` Dave Chinner
2010-10-08 5:21 ` [PATCH 02/18] fs: Convert nr_inodes and nr_unused to per-cpu counters Dave Chinner
2010-10-08 7:01 ` Christoph Hellwig
2010-10-08 5:21 ` [PATCH 03/18] fs: keep inode with backing-dev Dave Chinner
2010-10-08 7:01 ` Christoph Hellwig
2010-10-08 7:27 ` Dave Chinner
2010-10-08 5:21 ` [PATCH 04/18] fs: Implement lazy LRU updates for inodes Dave Chinner
2010-10-08 7:08 ` Christoph Hellwig
2010-10-08 7:31 ` Dave Chinner
2010-10-08 9:08 ` Al Viro
2010-10-08 9:51 ` Dave Chinner
2010-10-08 5:21 ` [PATCH 05/18] fs: inode split IO and LRU lists Dave Chinner
2010-10-08 7:14 ` Christoph Hellwig
2010-10-08 7:38 ` Dave Chinner
2010-10-08 9:16 ` Al Viro
2010-10-08 9:58 ` Dave Chinner
2010-10-08 5:21 ` [PATCH 06/18] fs: Clean up inode reference counting Dave Chinner
2010-10-08 7:20 ` Christoph Hellwig
2010-10-08 7:46 ` Dave Chinner
2010-10-08 8:15 ` Christoph Hellwig
2010-10-08 5:21 ` [PATCH 07/18] exofs: use iput() for inode reference count decrements Dave Chinner
2010-10-08 7:21 ` Christoph Hellwig
2010-10-16 7:56 ` Nick Piggin
2010-10-16 16:29 ` Christoph Hellwig
2010-10-17 15:41 ` Boaz Harrosh
2010-10-08 5:21 ` [PATCH 08/18] fs: add inode reference coutn read accessor Dave Chinner
2010-10-08 7:24 ` Christoph Hellwig
2010-10-08 5:21 ` Dave Chinner [this message]
2010-10-08 7:27 ` [PATCH 09/18] fs: rework icount to be a locked variable Christoph Hellwig
2010-10-08 7:50 ` Dave Chinner
2010-10-08 8:17 ` Christoph Hellwig
2010-10-08 13:16 ` Chris Mason
2010-10-08 9:32 ` Al Viro
2010-10-08 10:15 ` Dave Chinner
2010-10-08 13:14 ` Chris Mason
2010-10-08 13:53 ` Christoph Hellwig
2010-10-08 14:09 ` Dave Chinner
2010-10-08 5:21 ` [PATCH 10/18] fs: Factor inode hash operations into functions Dave Chinner
2010-10-08 7:29 ` Christoph Hellwig
2010-10-08 9:41 ` Al Viro
2010-10-08 5:21 ` [PATCH 11/18] fs: Introduce per-bucket inode hash locks Dave Chinner
2010-10-08 7:33 ` Christoph Hellwig
2010-10-08 7:51 ` Dave Chinner
2010-10-08 9:49 ` Al Viro
2010-10-08 9:51 ` Christoph Hellwig
2010-10-08 13:43 ` Christoph Hellwig
2010-10-08 14:17 ` Dave Chinner
2010-10-08 18:54 ` Christoph Hellwig
2010-10-16 7:57 ` Nick Piggin
2010-10-16 16:16 ` Christoph Hellwig
2010-10-16 17:12 ` Nick Piggin
2010-10-17 0:45 ` Christoph Hellwig
2010-10-17 2:06 ` Nick Piggin
2010-10-17 0:46 ` Dave Chinner
2010-10-17 2:25 ` Nick Piggin
2010-10-18 16:16 ` Andi Kleen
2010-10-18 16:21 ` Christoph Hellwig
2010-10-19 7:00 ` Nick Piggin
2010-10-19 16:50 ` Christoph Hellwig
2010-10-20 3:11 ` Nick Piggin
2010-10-24 15:44 ` Thomas Gleixner
2010-10-24 21:17 ` Nick Piggin
2010-10-25 4:41 ` Thomas Gleixner
2010-10-25 7:04 ` Thomas Gleixner
2010-10-26 0:12 ` Nick Piggin
2010-10-26 0:06 ` Nick Piggin
2010-10-08 5:21 ` [PATCH 12/18] fs: add a per-superblock lock for the inode list Dave Chinner
2010-10-08 7:35 ` Christoph Hellwig
2010-10-08 5:21 ` [PATCH 13/18] fs: split locking of inode writeback and LRU lists Dave Chinner
2010-10-08 7:42 ` Christoph Hellwig
2010-10-08 8:00 ` Dave Chinner
2010-10-08 8:18 ` Christoph Hellwig
2010-10-16 7:57 ` Nick Piggin
2010-10-16 16:20 ` Christoph Hellwig
2010-10-16 17:19 ` Nick Piggin
2010-10-17 1:00 ` Dave Chinner
2010-10-17 2:20 ` Nick Piggin
2010-10-08 5:21 ` [PATCH 14/18] fs: Protect inode->i_state with th einode->i_lock Dave Chinner
2010-10-08 7:49 ` Christoph Hellwig
2010-10-08 8:04 ` Dave Chinner
2010-10-08 8:18 ` Christoph Hellwig
2010-10-16 7:57 ` Nick Piggin
2010-10-16 16:19 ` Christoph Hellwig
2010-10-09 8:05 ` Christoph Hellwig
2010-10-09 14:52 ` Matthew Wilcox
2010-10-10 2:01 ` Dave Chinner
2010-10-08 5:21 ` [PATCH 15/18] fs: introduce a per-cpu last_ino allocator Dave Chinner
2010-10-08 7:53 ` Christoph Hellwig
2010-10-08 8:05 ` Dave Chinner
2010-10-08 8:22 ` Andi Kleen
2010-10-08 8:44 ` Christoph Hellwig
2010-10-08 9:58 ` Al Viro
2010-10-08 10:09 ` Andi Kleen
2010-10-08 10:19 ` Al Viro
2010-10-08 10:20 ` Eric Dumazet
2010-10-08 9:56 ` Al Viro
2010-10-08 10:03 ` Christoph Hellwig
2010-10-08 10:20 ` Eric Dumazet
2010-10-08 13:48 ` Christoph Hellwig
2010-10-08 14:06 ` Eric Dumazet
2010-10-08 19:10 ` Christoph Hellwig
2010-10-09 17:14 ` Matthew Wilcox
2010-10-16 7:57 ` Nick Piggin
2010-10-16 16:22 ` Christoph Hellwig
2010-10-16 17:21 ` Nick Piggin
2010-10-08 5:21 ` [PATCH 16/18] fs: Make iunique independent of inode_lock Dave Chinner
2010-10-08 7:55 ` Christoph Hellwig
2010-10-08 8:06 ` Dave Chinner
2010-10-08 8:19 ` Christoph Hellwig
2010-10-08 5:21 ` [PATCH 17/18] fs: icache remove inode_lock Dave Chinner
2010-10-08 8:03 ` Christoph Hellwig
2010-10-08 8:09 ` Dave Chinner
2010-10-13 7:20 ` Nick Piggin
2010-10-13 7:27 ` Nick Piggin
2010-10-13 11:28 ` Christoph Hellwig
2010-10-13 12:03 ` Nick Piggin
2010-10-13 12:20 ` Christoph Hellwig
2010-10-13 12:25 ` Nick Piggin
2010-10-13 10:42 ` Eric Dumazet
2010-10-13 12:07 ` Nick Piggin
2010-10-13 11:25 ` Christoph Hellwig
2010-10-13 12:30 ` Nick Piggin
2010-10-13 23:23 ` Dave Chinner
2010-10-14 9:06 ` Nick Piggin
2010-10-14 9:13 ` Nick Piggin
2010-10-14 14:41 ` Christoph Hellwig
2010-10-15 0:14 ` Nick Piggin
2010-10-15 3:13 ` Dave Chinner
2010-10-15 3:30 ` Nick Piggin
2010-10-15 3:44 ` Nick Piggin
2010-10-15 6:41 ` Nick Piggin
2010-10-15 10:59 ` Dave Chinner
2010-10-15 13:03 ` Nick Piggin
2010-10-15 13:29 ` Nick Piggin
2010-10-15 17:33 ` Nick Piggin
2010-10-15 17:52 ` Christoph Hellwig
2010-10-15 18:02 ` Nick Piggin
2010-10-15 18:14 ` Nick Piggin
2010-10-16 2:09 ` Nick Piggin
2010-10-15 14:11 ` Nick Piggin
2010-10-15 20:50 ` Nick Piggin
2010-10-15 20:56 ` Nick Piggin
2010-10-15 4:04 ` Nick Piggin
2010-10-15 11:33 ` Dave Chinner
2010-10-15 13:14 ` Nick Piggin
2010-10-15 15:38 ` Nick Piggin
2010-10-16 7:57 ` Nick Piggin
2010-10-08 5:21 ` [PATCH 18/18] fs: Reduce inode I_FREEING and factor inode disposal Dave Chinner
2010-10-08 8:11 ` Christoph Hellwig
2010-10-08 10:18 ` Al Viro
2010-10-08 10:52 ` Dave Chinner
2010-10-08 12:10 ` Al Viro
2010-10-08 13:55 ` Dave Chinner
2010-10-09 17:22 ` Matthew Wilcox
2010-10-09 8:08 ` [PATCH 19/18] fs: split __inode_add_to_list Christoph Hellwig
2010-10-12 10:47 ` Dave Chinner
2010-10-12 11:31 ` Christoph Hellwig
2010-10-12 12:05 ` Dave Chinner
2010-10-09 11:18 ` [PATCH 20/18] fs: do not assign default i_ino in new_inode Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1286515292-15882-10-git-send-email-david@fromorbit.com \
--to=david@fromorbit.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).