From: Mike Christie <mchristi@redhat.com>
To: Ross Zwisler <zwisler@gmail.com>,
Ross Zwisler <ross.zwisler@linux.intel.com>,
Dave Chinner <david@fromorbit.com>,
axboe@fb.com
Cc: linux-bcache@vger.kernel.org, linux-block@vger.kernel.org,
XFS Developers <xfs@oss.sgi.com>,
ocfs2-devel@oss.oracle.com, linux-scsi@vger.kernel.org,
konrad.wilk@oracle.com, LKML <linux-kernel@vger.kernel.org>,
philipp.reisner@linbit.com,
linux-f2fs-devel@lists.sourceforge.net,
linux-raid@vger.kernel.org, dm-devel@redhat.com,
target-devel@vger.kernel.org, linux-mtd@lists.infradead.org,
osd-dev@open-osd.org,
linux-fsdevel <linux-fsdevel@vger.kernel.org>,
lars.ellenberg@linbit.com,
linux-ext4 <linux-ext4@vger.kernel.org>,
linux-btrfs@vger.kernel.org, drbd-dev@lists.linbit.com
Subject: Re: [PATCH 37/45] drivers: use req op accessor
Date: Wed, 3 Aug 2016 18:47:47 -0500 [thread overview]
Message-ID: <87fbe31f-27b3-ff7a-e381-34fc1aad337e@redhat.com> (raw)
In-Reply-To: <CAOxpaSUMsgfTYsL0bL6VCnwxz7bpgpKO-ZfC5J8dFG9PN-hzvg@mail.gmail.com>
[-- Attachment #1: Type: text/plain, Size: 1470 bytes --]
On 08/03/2016 05:33 PM, Ross Zwisler wrote:
> On Sun, Jun 5, 2016 at 1:32 PM, <mchristi@redhat.com> wrote:
>> From: Mike Christie <mchristi@redhat.com>
>>
>> The req operation REQ_OP is separated from the rq_flag_bits
>> definition. This converts the block layer drivers to
>> use req_op to get the op from the request struct.
>>
>> Signed-off-by: Mike Christie <mchristi@redhat.com>
>> ---
>> drivers/block/loop.c | 6 +++---
>> drivers/block/mtip32xx/mtip32xx.c | 2 +-
>> drivers/block/nbd.c | 2 +-
>> drivers/block/rbd.c | 4 ++--
>> drivers/block/xen-blkfront.c | 8 +++++---
>> drivers/ide/ide-floppy.c | 2 +-
>> drivers/md/dm.c | 2 +-
>> drivers/mmc/card/block.c | 7 +++----
>> drivers/mmc/card/queue.c | 6 ++----
>
> Dave Chinner reported a deadlock with XFS + DAX, which I reproduced
> and bisected to this commit:
>
> commit c2df40dfb8c015211ec55f4b1dd0587f875c7b34
> Author: Mike Christie <mchristi@redhat.com>
> Date: Sun Jun 5 14:32:17 2016 -0500
> drivers: use req op accessor
>
> Here are the steps to reproduce the deadlock with a BRD ramdisk:
>
> mkfs.xfs -f /dev/ram0
> mount -o dax /dev/ram0 /mnt/scratch
When using ramdisks, we need the attached patch like in your other bug
report. I think it will fix some hangs people are seeing.
I do not think that it should cause the failure to run issue you saw
when doing generic/008 and ext2.
[-- Attachment #2: convert-rw_page.patch --]
[-- Type: text/x-patch, Size: 12108 bytes --]
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3022dad..9fbbeba 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, int rw,
+ unsigned int len, unsigned int off, int op,
sector_t sector)
{
void *mem;
int err = 0;
- if (rw != READ) {
+ if (op_is_write(op)) {
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (rw == READ) {
+ if (!op_is_write(op)) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -330,7 +330,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
- int rw;
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
@@ -347,14 +346,12 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
goto out;
}
- rw = bio_data_dir(bio);
-
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
err = brd_do_bvec(brd, bvec.bv_page, len,
- bvec.bv_offset, rw, sector);
+ bvec.bv_offset, bio_op(bio), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
@@ -369,11 +366,11 @@ io_error:
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, int op, int op_flags)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, err);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
+ page_endio(page, op, err);
return err;
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7454cf1..f0e126c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -843,15 +843,15 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, int rw)
+ int offset, int op)
{
unsigned long start_time = jiffies;
int ret;
- generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
- if (rw == READ) {
+ if (!op_is_write(op)) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset);
} else {
@@ -859,10 +859,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset);
}
- generic_end_io_acct(rw, &zram->disk->part0, start_time);
+ generic_end_io_acct(op, &zram->disk->part0, start_time);
if (unlikely(ret)) {
- if (rw == READ)
+ if (!op_is_write(op))
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
@@ -873,7 +873,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
static void __zram_make_request(struct zram *zram, struct bio *bio)
{
- int offset, rw;
+ int offset;
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
@@ -888,7 +888,6 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
return;
}
- rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
@@ -903,15 +902,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = max_transfer_size;
bv.bv_offset = bvec.bv_offset;
- if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index, offset,
+ bio_op(bio)) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index + 1, 0,
+ bio_op(bio)) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
+ if (zram_bvec_rw(zram, &bvec, index, offset,
+ bio_op(bio)) < 0)
goto out;
update_position(&index, &offset, &bvec);
@@ -968,7 +970,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, int op, int op_flags)
{
int offset, err = -EIO;
u32 index;
@@ -992,7 +994,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, rw);
+ err = zram_bvec_rw(zram, &bv, index, offset, op);
put_zram:
zram_meta_put(zram);
out:
@@ -1005,7 +1007,7 @@ out:
* (e.g., SetPageError, set_page_dirty and extra works).
*/
if (err == 0)
- page_endio(page, rw, 0);
+ page_endio(page, op, 0);
return err;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 9dce03f..6a6208d 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- int rw, sector_t sector)
+ int op, sector_t sector)
{
int ret;
- if (rw == READ) {
+ if (!op_is_write(op)) {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
@@ -1155,7 +1155,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
- int err = 0, rw;
+ int err = 0;
bool do_acct;
/*
@@ -1170,7 +1170,6 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
do_acct = nd_iostat_start(bio, &start);
- rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@@ -1181,11 +1180,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
BUG_ON(len % btt->sector_size);
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
- rw, iter.bi_sector);
+ bio_op(bio), iter.bi_sector);
if (err) {
dev_info(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
- (rw == READ) ? "READ" : "WRITE",
+ (op_is_write(bio_op(bio))) ? "WRITE" :
+ "READ",
(unsigned long long) iter.bi_sector, len);
bio->bi_error = err;
break;
@@ -1200,12 +1200,12 @@ out:
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, int op, int op_flags)
{
struct btt *btt = bdev->bd_disk->private_data;
- btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, 0);
+ btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector);
+ page_endio(page, op, 0);
return 0;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b511099..6a7b97d 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, int rw,
+ unsigned int len, unsigned int off, int op,
sector_t sector)
{
int rc = 0;
@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (rw == READ) {
+ if (!op_is_write(op)) {
if (unlikely(bad_pmem))
rc = -EIO;
else {
@@ -134,7 +134,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, bio_data_dir(bio),
+ bvec.bv_offset, bio_op(bio),
iter.bi_sector);
if (rc) {
bio->bi_error = rc;
@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, int op, int op_flags)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc;
- rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
+ rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector);
/*
* The ->rw_page interface is subtle and tricky. The core
@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
* caused by double completion.
*/
if (rc == 0)
- page_endio(page, rw & WRITE, 0);
+ page_endio(page, op, 0);
return rc;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ada42cf..e790ced 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -416,7 +416,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
result = blk_queue_enter(bdev->bd_queue, false);
if (result)
return result;
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
+ REQ_OP_READ, 0);
blk_queue_exit(bdev->bd_queue);
return result;
}
@@ -445,7 +446,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
struct page *page, struct writeback_control *wbc)
{
int result;
- int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
+ int op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0;
const struct block_device_operations *ops = bdev->bd_disk->fops;
if (!ops->rw_page || bdev_get_integrity(bdev))
@@ -455,7 +456,8 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
return result;
set_page_writeback(page);
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
+ REQ_OP_WRITE, op_flags);
if (result)
end_page_writeback(page);
else
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index adf3307..3652408 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1673,7 +1673,8 @@ struct blk_dax_ctl {
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
+ int (*rw_page)(struct block_device *, sector_t, struct page *,
+ int op, int op_flags);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 81363b8..4578637 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page)
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
-void page_endio(struct page *page, int rw, int err);
+void page_endio(struct page *page, int op, int err);
/*
* Add an arbitrary waiter to a page's wait queue
diff --git a/mm/filemap.c b/mm/filemap.c
index 3083ded..daef091 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -887,9 +887,9 @@ EXPORT_SYMBOL(end_page_writeback);
* After completing I/O on a page, call this routine to update the page
* flags appropriately
*/
-void page_endio(struct page *page, int rw, int err)
+void page_endio(struct page *page, int op, int err)
{
- if (rw == READ) {
+ if (!op_is_write(op)) {
if (!err) {
SetPageUptodate(page);
} else {
@@ -897,7 +897,7 @@ void page_endio(struct page *page, int rw, int err)
SetPageError(page);
}
unlock_page(page);
- } else { /* rw == WRITE */
+ } else {
if (err) {
SetPageError(page);
if (page->mapping)
[-- Attachment #3: Type: text/plain, Size: 121 bytes --]
_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs
next prev parent reply other threads:[~2016-08-03 23:47 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-05 19:31 [PATCH 00/45] v8: separate operations from flags in the bio/request structs mchristi
2016-06-05 19:31 ` [PATCH 01/45] block/fs/drivers: remove rw argument from submit_bio mchristi
2016-06-05 20:30 ` kbuild test robot
2016-06-05 21:22 ` kbuild test robot
2016-06-05 21:48 ` kbuild test robot
2016-06-06 6:12 ` Hannes Reinecke
2016-06-05 19:31 ` [PATCH 02/45] block: add REQ_OP definitions and helpers mchristi
2016-06-06 6:13 ` Hannes Reinecke
2016-06-05 19:31 ` [PATCH 03/45] fs: have submit_bh users pass in op and flags separately mchristi
2016-06-05 19:31 ` [PATCH 04/45] fs: have ll_rw_block " mchristi
2016-06-05 19:31 ` [PATCH 05/45] block, drivers, cgroup: use op_is_write helper instead of checking for REQ_WRITE mchristi
2016-06-06 6:14 ` Hannes Reinecke
2016-06-05 19:31 ` [PATCH 06/45] dm: use op_is_write " mchristi
2016-06-06 6:15 ` Hannes Reinecke
2016-06-05 19:31 ` [PATCH 07/45] bcache: " mchristi
2016-06-06 6:16 ` Hannes Reinecke
2016-06-05 19:31 ` [PATCH 08/45] block, fs, mm, drivers: use bio set/get op accessors mchristi
2016-06-06 6:20 ` Hannes Reinecke
2016-06-05 19:31 ` [PATCH 09/45] block discard: use bio set op accessor mchristi
2016-06-06 6:21 ` Hannes Reinecke
2016-06-05 19:31 ` [PATCH 10/45] direct-io: use bio set/get op accessors mchristi
2016-06-05 19:31 ` [PATCH 11/45] btrfs: have submit_one_bio users use bio " mchristi
2016-06-05 19:31 ` [PATCH 12/45] btrfs: " mchristi
2016-06-05 19:31 ` [PATCH 13/45] btrfs: update __btrfs_map_block for REQ_OP transition mchristi
2016-06-05 19:31 ` [PATCH 14/45] btrfs: use bio fields for op and flags mchristi
2016-06-05 19:31 ` [PATCH 15/45] f2fs: use bio op accessors mchristi
2016-06-05 19:31 ` [PATCH 16/45] gfs2: " mchristi
2016-06-05 19:31 ` [PATCH 17/45] xfs: " mchristi
2016-06-05 19:31 ` [PATCH 18/45] hfsplus: " mchristi
2016-06-05 19:31 ` [PATCH 19/45] mpage: " mchristi
2016-06-05 19:32 ` [PATCH 20/45] nilfs: " mchristi
2016-06-05 19:32 ` [PATCH 21/45] ocfs2: " mchristi
2016-06-05 19:32 ` [PATCH 22/45] pm: " mchristi
2016-06-05 19:32 ` [PATCH 23/45] dm: pass dm stats data dir instead of bi_rw mchristi
2016-06-05 19:32 ` [PATCH 24/45] dm: use bio op accessors mchristi
2016-06-06 6:43 ` Hannes Reinecke
2016-06-05 19:32 ` [PATCH 25/45] bcache: " mchristi
2016-06-06 6:45 ` Hannes Reinecke
2016-06-05 19:32 ` [PATCH 26/45] drbd: " mchristi
2016-06-05 19:32 ` [PATCH 27/45] md: " mchristi
2016-06-05 19:32 ` [PATCH 28/45] target: " mchristi
2016-06-06 6:46 ` Hannes Reinecke
2016-06-06 15:40 ` Mike Christie
2016-06-06 15:43 ` Hannes Reinecke
2016-06-05 19:32 ` [PATCH 29/45] xen: " mchristi
2016-06-05 19:32 ` [PATCH 30/45] block: copy bio op to request op mchristi
2016-06-05 19:32 ` [PATCH 31/45] block: prepare request creation/destruction code to use REQ_OPs mchristi
2016-06-05 19:32 ` [PATCH 32/45] block: prepare mq request creation " mchristi
2016-06-05 19:32 ` [PATCH 33/45] block: prepare elevator " mchristi
2016-06-05 19:32 ` [PATCH 34/45] blkg_rwstat: separate op from flags mchristi
2016-06-05 19:32 ` [PATCH 35/45] block: convert merge/insert code to check for REQ_OPs mchristi
2016-06-05 19:32 ` [PATCH 36/45] block: convert is_sync helpers to use REQ_OPs mchristi
2016-06-05 19:32 ` [PATCH 37/45] drivers: use req op accessor mchristi
2016-06-06 6:50 ` Hannes Reinecke
2016-08-03 22:33 ` Ross Zwisler
2016-08-03 23:47 ` Mike Christie [this message]
2016-08-04 0:30 ` Shaun Tancheff
2016-08-04 5:47 ` Mike Christie
2016-08-04 15:46 ` Christoph Hellwig
2016-08-04 16:32 ` Shaun Tancheff
2016-06-05 19:32 ` [PATCH 38/45] blktrace: use op accessors mchristi
2016-06-05 19:32 ` [PATCH 39/45] ide cd: do not set REQ_WRITE on requests mchristi
2016-06-05 19:32 ` [PATCH 40/45] block: move bio io prio to a new field mchristi
2016-06-06 6:51 ` Hannes Reinecke
2016-06-05 19:32 ` [PATCH 41/45] block, drivers, fs: shrink bi_rw from long to int mchristi
2016-06-06 6:51 ` Hannes Reinecke
2016-06-05 19:32 ` [PATCH 42/45] block, fs, drivers: remove REQ_OP compat defs and related code mchristi
2016-06-05 20:37 ` kbuild test robot
2016-06-05 21:43 ` kbuild test robot
2016-06-05 21:49 ` kbuild test robot
2016-06-06 6:53 ` Hannes Reinecke
2016-08-03 16:25 ` Ross Zwisler
2016-08-03 17:28 ` Mike Christie
2016-06-05 19:32 ` [PATCH 43/45] block, drivers: add REQ_OP_FLUSH operation mchristi
2016-06-05 19:32 ` [PATCH 44/45] block: do not use REQ_FLUSH for tracking flush support mchristi
2016-06-05 19:32 ` [PATCH 45/45] block, drivers, fs: rename REQ_FLUSH to REQ_PREFLUSH mchristi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87fbe31f-27b3-ff7a-e381-34fc1aad337e@redhat.com \
--to=mchristi@redhat.com \
--cc=axboe@fb.com \
--cc=david@fromorbit.com \
--cc=dm-devel@redhat.com \
--cc=drbd-dev@lists.linbit.com \
--cc=konrad.wilk@oracle.com \
--cc=lars.ellenberg@linbit.com \
--cc=linux-bcache@vger.kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-ext4@vger.kernel.org \
--cc=linux-f2fs-devel@lists.sourceforge.net \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mtd@lists.infradead.org \
--cc=linux-raid@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
--cc=ocfs2-devel@oss.oracle.com \
--cc=osd-dev@open-osd.org \
--cc=philipp.reisner@linbit.com \
--cc=ross.zwisler@linux.intel.com \
--cc=target-devel@vger.kernel.org \
--cc=xfs@oss.sgi.com \
--cc=zwisler@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).