From: Sarthak Kukreti <sarthakkukreti@chromium.org>
To: dm-devel@redhat.com, linux-block@vger.kernel.org,
linux-ext4@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-fsdevel@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Alasdair Kergon <agk@redhat.com>,
Mike Snitzer <snitzer@kernel.org>,
Christoph Hellwig <hch@infradead.org>,
Brian Foster <bfoster@redhat.com>, Theodore Ts'o <tytso@mit.edu>,
Andreas Dilger <adilger.kernel@dilger.ca>,
Bart Van Assche <bvanassche@google.com>,
Daniil Lunev <dlunev@google.com>,
"Darrick J. Wong" <djwong@kernel.org>
Subject: [PATCH v4 1/4] block: Introduce provisioning primitives
Date: Tue, 18 Apr 2023 15:12:04 -0700 [thread overview]
Message-ID: <20230418221207.244685-2-sarthakkukreti@chromium.org> (raw)
In-Reply-To: <20230418221207.244685-1-sarthakkukreti@chromium.org>
Introduce block request REQ_OP_PROVISION. The intent of this request
is to request underlying storage to preallocate disk space for the given
block range. Block devices that support this capability will export
a provision limit within their request queues.
This patch also adds the capability to call fallocate() in mode 0
on block devices, which will send REQ_OP_PROVISION to the block
device for the specified range,
Signed-off-by: Sarthak Kukreti <sarthakkukreti@chromium.org>
---
block/blk-core.c | 5 ++++
block/blk-lib.c | 53 +++++++++++++++++++++++++++++++++++++++
block/blk-merge.c | 18 +++++++++++++
block/blk-settings.c | 19 ++++++++++++++
block/blk-sysfs.c | 8 ++++++
block/bounce.c | 1 +
block/fops.c | 25 +++++++++++++-----
include/linux/bio.h | 6 +++--
include/linux/blk_types.h | 5 +++-
include/linux/blkdev.h | 16 ++++++++++++
10 files changed, 147 insertions(+), 9 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index 42926e6cb83c..4a2342ba3a8b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -123,6 +123,7 @@ static const char *const blk_op_name[] = {
REQ_OP_NAME(WRITE_ZEROES),
REQ_OP_NAME(DRV_IN),
REQ_OP_NAME(DRV_OUT),
+ REQ_OP_NAME(PROVISION)
};
#undef REQ_OP_NAME
@@ -798,6 +799,10 @@ void submit_bio_noacct(struct bio *bio)
if (!q->limits.max_write_zeroes_sectors)
goto not_supported;
break;
+ case REQ_OP_PROVISION:
+ if (!q->limits.max_provision_sectors)
+ goto not_supported;
+ break;
default:
break;
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index e59c3069e835..647b6451660b 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -343,3 +343,56 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
return ret;
}
EXPORT_SYMBOL(blkdev_issue_secure_erase);
+
+/**
+ * blkdev_issue_provision - provision a block range
+ * @bdev: blockdev to write
+ * @sector: start sector
+ * @nr_sects: number of sectors to provision
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Issues a provision request to the block device for the range of sectors.
+ * For thinly provisioned block devices, this acts as a signal for the
+ * underlying storage pool to allocate space for this block range.
+ */
+int blkdev_issue_provision(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp)
+{
+ sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ unsigned int max_sectors = bdev_max_provision_sectors(bdev);
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ int ret = 0;
+
+ if (max_sectors == 0)
+ return -EOPNOTSUPP;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
+ blk_start_plug(&plug);
+ for (;;) {
+ unsigned int req_sects = min_t(sector_t, nr_sects, max_sectors);
+
+ bio = blk_next_bio(bio, bdev, 0, REQ_OP_PROVISION, gfp);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = req_sects << SECTOR_SHIFT;
+
+ sector += req_sects;
+ nr_sects -= req_sects;
+ if (!nr_sects) {
+ ret = submit_bio_wait(bio);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ bio_put(bio);
+ break;
+ }
+ cond_resched();
+ }
+ blk_finish_plug(&plug);
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_provision);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6460abdb2426..a3ffebb97a1d 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -158,6 +158,21 @@ static struct bio *bio_split_write_zeroes(struct bio *bio,
return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
}
+static struct bio *bio_split_provision(struct bio *bio,
+ const struct queue_limits *lim,
+ unsigned int *nsegs, struct bio_set *bs)
+{
+ *nsegs = 0;
+
+ if (!lim->max_provision_sectors)
+ return NULL;
+
+ if (bio_sectors(bio) <= lim->max_provision_sectors)
+ return NULL;
+
+ return bio_split(bio, lim->max_provision_sectors, GFP_NOIO, bs);
+}
+
/*
* Return the maximum number of sectors from the start of a bio that may be
* submitted as a single request to a block device. If enough sectors remain,
@@ -366,6 +381,9 @@ struct bio *__bio_split_to_limits(struct bio *bio,
case REQ_OP_WRITE_ZEROES:
split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
break;
+ case REQ_OP_PROVISION:
+ split = bio_split_provision(bio, lim, nr_segs, bs);
+ break;
default:
split = bio_split_rw(bio, lim, nr_segs, bs,
get_max_io_size(bio, lim) << SECTOR_SHIFT);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 896b4654ab00..d303e6614c36 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -59,6 +59,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->zoned = BLK_ZONED_NONE;
lim->zone_write_granularity = 0;
lim->dma_alignment = 511;
+ lim->max_provision_sectors = 0;
}
/**
@@ -82,6 +83,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->max_dev_sectors = UINT_MAX;
lim->max_write_zeroes_sectors = UINT_MAX;
lim->max_zone_append_sectors = UINT_MAX;
+ lim->max_provision_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -208,6 +210,20 @@ void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
}
EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
+/**
+ * blk_queue_max_provision_sectors - set max sectors for a single provision
+ *
+ * @q: the request queue for the device
+ * @max_provision_sectors: maximum number of sectors to provision per command
+ **/
+
+void blk_queue_max_provision_sectors(struct request_queue *q,
+ unsigned int max_provision_sectors)
+{
+ q->limits.max_provision_sectors = max_provision_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_provision_sectors);
+
/**
* blk_queue_max_zone_append_sectors - set max sectors for a single zone append
* @q: the request queue for the device
@@ -578,6 +594,9 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
+ t->max_provision_sectors = min_not_zero(t->max_provision_sectors,
+ b->max_provision_sectors);
+
t->misaligned |= b->misaligned;
alignment = queue_limit_alignment_offset(b, start);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index f1fce1c7fa44..202aa78f933e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -132,6 +132,12 @@ static ssize_t queue_max_discard_segments_show(struct request_queue *q,
return queue_var_show(queue_max_discard_segments(q), page);
}
+static ssize_t queue_max_provision_sectors_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_max_provision_sectors(q), (page));
+}
+
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{
return queue_var_show(q->limits.max_integrity_segments, page);
@@ -599,6 +605,7 @@ QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
+QUEUE_RO_ENTRY(queue_max_provision_sectors, "max_provision_sectors");
QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
@@ -648,6 +655,7 @@ static struct attribute *queue_attrs[] = {
&queue_max_sectors_entry.attr,
&queue_max_segments_entry.attr,
&queue_max_discard_segments_entry.attr,
+ &queue_max_provision_sectors_entry.attr,
&queue_max_integrity_segments_entry.attr,
&queue_max_segment_size_entry.attr,
&elv_iosched_entry.attr,
diff --git a/block/bounce.c b/block/bounce.c
index 7cfcb242f9a1..ab9d8723ae64 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -176,6 +176,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_PROVISION:
break;
default:
bio_for_each_segment(bv, bio_src, iter)
diff --git a/block/fops.c b/block/fops.c
index d2e6be4e3d1c..e1775269654a 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -611,9 +611,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
return ret;
}
+#define BLKDEV_FALLOC_FL_TRUNCATE \
+ (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_NO_HIDE_STALE)
+
#define BLKDEV_FALLOC_FL_SUPPORTED \
- (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
+ (BLKDEV_FALLOC_FL_TRUNCATE | FALLOC_FL_KEEP_SIZE | \
+ FALLOC_FL_UNSHARE_RANGE)
static long blkdev_fallocate(struct file *file, int mode, loff_t start,
loff_t len)
@@ -625,7 +629,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
int error;
/* Fail if we don't recognize the flags. */
- if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
+ if (mode != 0 && mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
return -EOPNOTSUPP;
/* Don't go off the end of the device. */
@@ -649,11 +653,20 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
filemap_invalidate_lock(inode->i_mapping);
/* Invalidate the page cache, including dirty pages. */
- error = truncate_bdev_range(bdev, file->f_mode, start, end);
- if (error)
- goto fail;
+ if (mode & BLKDEV_FALLOC_FL_TRUNCATE) {
+ error = truncate_bdev_range(bdev, file->f_mode, start, end);
+ if (error)
+ goto fail;
+ }
switch (mode) {
+ case 0:
+ case FALLOC_FL_UNSHARE_RANGE:
+ case FALLOC_FL_KEEP_SIZE:
+ case FALLOC_FL_UNSHARE_RANGE | FALLOC_FL_KEEP_SIZE:
+ error = blkdev_issue_provision(bdev, start >> SECTOR_SHIFT,
+ len >> SECTOR_SHIFT, GFP_KERNEL);
+ break;
case FALLOC_FL_ZERO_RANGE:
case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d766be7152e1..9820b3b039f2 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -57,7 +57,8 @@ static inline bool bio_has_data(struct bio *bio)
bio->bi_iter.bi_size &&
bio_op(bio) != REQ_OP_DISCARD &&
bio_op(bio) != REQ_OP_SECURE_ERASE &&
- bio_op(bio) != REQ_OP_WRITE_ZEROES)
+ bio_op(bio) != REQ_OP_WRITE_ZEROES &&
+ bio_op(bio) != REQ_OP_PROVISION)
return true;
return false;
@@ -67,7 +68,8 @@ static inline bool bio_no_advance_iter(const struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_ZEROES;
+ bio_op(bio) == REQ_OP_WRITE_ZEROES ||
+ bio_op(bio) == REQ_OP_PROVISION;
}
static inline void *bio_data(struct bio *bio)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 99be590f952f..27bdf88f541c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -385,7 +385,10 @@ enum req_op {
REQ_OP_DRV_IN = (__force blk_opf_t)34,
REQ_OP_DRV_OUT = (__force blk_opf_t)35,
- REQ_OP_LAST = (__force blk_opf_t)36,
+ /* request device to provision block */
+ REQ_OP_PROVISION = (__force blk_opf_t)37,
+
+ REQ_OP_LAST = (__force blk_opf_t)38,
};
enum req_flag_bits {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 941304f17492..239e2f418b6e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -303,6 +303,7 @@ struct queue_limits {
unsigned int discard_granularity;
unsigned int discard_alignment;
unsigned int zone_write_granularity;
+ unsigned int max_provision_sectors;
unsigned short max_segments;
unsigned short max_integrity_segments;
@@ -921,6 +922,8 @@ extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
+extern void blk_queue_max_provision_sectors(struct request_queue *q,
+ unsigned int max_provision_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
unsigned int max_zone_append_sectors);
@@ -1060,6 +1063,9 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp);
+extern int blkdev_issue_provision(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
@@ -1139,6 +1145,11 @@ static inline unsigned short queue_max_discard_segments(const struct request_que
return q->limits.max_discard_segments;
}
+static inline unsigned short queue_max_provision_sectors(const struct request_queue *q)
+{
+ return q->limits.max_provision_sectors;
+}
+
static inline unsigned int queue_max_segment_size(const struct request_queue *q)
{
return q->limits.max_segment_size;
@@ -1281,6 +1292,11 @@ static inline bool bdev_nowait(struct block_device *bdev)
return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
}
+static inline unsigned int bdev_max_provision_sectors(struct block_device *bdev)
+{
+ return bdev_get_queue(bdev)->limits.max_provision_sectors;
+}
+
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
{
return blk_queue_zoned_model(bdev_get_queue(bdev));
--
2.40.0.634.g4ca3ef3211-goog
next prev parent reply other threads:[~2023-04-18 22:12 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20221229071647.437095-1-sarthakkukreti@chromium.org>
2023-04-14 0:02 ` [PATCH v3 0/3] Introduce provisioning primitives for thinly provisioned storage Sarthak Kukreti
2023-04-14 0:02 ` [PATCH v3 1/3] block: Introduce provisioning primitives Sarthak Kukreti
2023-04-17 17:35 ` Brian Foster
2023-04-18 22:13 ` Sarthak Kukreti
2023-04-14 0:02 ` [PATCH v3 2/3] dm: Add support for block provisioning Sarthak Kukreti
[not found] ` <CAJ0trDbyqoKEDN4kzcdn+vWhx+hk6pTM4ndf-E02f3uT9YZ3Uw@mail.gmail.com>
2023-04-14 18:14 ` Mike Snitzer
2023-04-14 21:58 ` Mike Snitzer
2023-04-18 22:13 ` Sarthak Kukreti
2023-04-14 0:02 ` [PATCH v3 3/3] loop: Add support for provision requests Sarthak Kukreti
2023-04-18 22:12 ` [PATCH v4 0/4] Introduce provisioning primitives for thinly provisioned storage Sarthak Kukreti
2023-04-18 22:12 ` Sarthak Kukreti [this message]
2023-04-18 22:43 ` [PATCH v4 1/4] block: Introduce provisioning primitives Bart Van Assche
2023-04-20 17:41 ` Sarthak Kukreti
2023-04-19 15:36 ` [dm-devel] " Darrick J. Wong
2023-04-19 16:17 ` Mike Snitzer
2023-04-19 17:26 ` Darrick J. Wong
2023-04-19 23:21 ` Dave Chinner
2023-04-20 0:53 ` Sarthak Kukreti
2023-04-18 22:12 ` [PATCH v4 2/4] dm: Add block provisioning support Sarthak Kukreti
2023-04-18 22:12 ` [PATCH v4 3/4] dm-thin: Add REQ_OP_PROVISION support Sarthak Kukreti
2023-04-18 22:12 ` [PATCH v4 4/4] loop: Add support for provision requests Sarthak Kukreti
2023-04-20 0:48 ` [PATCH v5 0/5] Introduce block provisioning primitives Sarthak Kukreti
2023-04-20 0:48 ` [PATCH v5 1/5] block: Don't invalidate pagecache for invalid falloc modes Sarthak Kukreti
2023-04-20 1:22 ` Darrick J. Wong
2023-04-20 1:48 ` Sarthak Kukreti
2023-04-20 1:47 ` [PATCH v5-fix " Sarthak Kukreti
2023-04-20 16:20 ` Mike Snitzer
2023-04-20 17:28 ` Sarthak Kukreti
2023-04-20 18:17 ` Sarthak Kukreti
2023-04-20 18:15 ` Sarthak Kukreti
2023-04-24 15:54 ` [PATCH v5 " kernel test robot
2023-05-04 8:50 ` kernel test robot
2023-04-20 0:48 ` [PATCH v5 2/5] block: Introduce provisioning primitives Sarthak Kukreti
2023-04-20 0:48 ` [PATCH v5 3/5] dm: Add block provisioning support Sarthak Kukreti
2023-04-20 0:48 ` [PATCH v5 4/5] dm-thin: Add REQ_OP_PROVISION support Sarthak Kukreti
2023-05-01 19:15 ` Mike Snitzer
2023-05-06 6:32 ` Sarthak Kukreti
2023-04-20 0:48 ` [PATCH v5 5/5] loop: Add support for provision requests Sarthak Kukreti
2023-05-06 6:29 ` [PATCH v6 0/5] Introduce block provisioning primitives Sarthak Kukreti
2023-05-06 6:29 ` [PATCH v6 1/5] block: Don't invalidate pagecache for invalid falloc modes Sarthak Kukreti
2023-05-09 16:51 ` Mike Snitzer
2023-05-12 18:31 ` Darrick J. Wong
2023-05-06 6:29 ` [PATCH v6 2/5] block: Introduce provisioning primitives Sarthak Kukreti
2023-05-09 16:52 ` Mike Snitzer
2023-05-12 18:37 ` Darrick J. Wong
2023-05-15 21:55 ` Sarthak Kukreti
2023-05-06 6:29 ` [PATCH v6 3/5] dm: Add block provisioning support Sarthak Kukreti
2023-05-09 16:52 ` Mike Snitzer
2023-05-06 6:29 ` [PATCH v6 4/5] dm-thin: Add REQ_OP_PROVISION support Sarthak Kukreti
2023-05-09 16:58 ` Mike Snitzer
2023-05-11 20:03 ` Sarthak Kukreti
2023-05-12 14:34 ` Mike Snitzer
2023-05-12 17:32 ` Mike Snitzer
2023-05-15 21:19 ` Sarthak Kukreti
2023-05-06 6:29 ` [PATCH v6 5/5] loop: Add support for provision requests Sarthak Kukreti
2023-05-15 12:40 ` Brian Foster
2023-05-15 21:31 ` Sarthak Kukreti
2023-05-12 18:28 ` [PATCH v6 0/5] Introduce block provisioning primitives Darrick J. Wong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230418221207.244685-2-sarthakkukreti@chromium.org \
--to=sarthakkukreti@chromium.org \
--cc=adilger.kernel@dilger.ca \
--cc=agk@redhat.com \
--cc=axboe@kernel.dk \
--cc=bfoster@redhat.com \
--cc=bvanassche@google.com \
--cc=djwong@kernel.org \
--cc=dlunev@google.com \
--cc=dm-devel@redhat.com \
--cc=hch@infradead.org \
--cc=jasowang@redhat.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-ext4@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mst@redhat.com \
--cc=snitzer@kernel.org \
--cc=stefanha@redhat.com \
--cc=tytso@mit.edu \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).