* [PATCH v4 2/4] target: ensure se_cmd->t_prot_sg is allocated when required
[not found] <1430461431-5936-1-git-send-email-akinobu.mita@gmail.com>
2015-05-01 6:23 ` [PATCH v4 1/4] target: Fix inconsistent address passed to kunmap_atomic() in sbc_dif_copy_prot() Akinobu Mita
@ 2015-05-01 6:23 ` Akinobu Mita
2015-05-01 6:23 ` [PATCH v4 4/4] target: handle odd SG mapping for data transfer memory Akinobu Mita
2 siblings, 0 replies; 3+ messages in thread
From: Akinobu Mita @ 2015-05-01 6:23 UTC (permalink / raw)
To: target-devel
Cc: Akinobu Mita, Nicholas Bellinger, Sagi Grimberg,
Martin K. Petersen, Christoph Hellwig, James E.J. Bottomley,
linux-scsi
Even if the device backend is initialized with protection info is
enabled, some requests don't have the protection info attached for
WRITE SAME command issued by block device helpers, WRITE command with
WRPROTECT=0 by SG_IO ioctl, etc.
So when TCM loopback fabric module is used, se_cmd->t_prot_sg is NULL
for these requests and performing WRITE_INSERT of PI using software
emulation by sbc_dif_generate() causes kernel crash.
To fix this, introduce SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC for
se_cmd_flags, which is used to determine that se_cmd->t_prot_sg needs
to be allocated or use pre-allocated protection information by scsi
mid-layer.
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Nicholas Bellinger <nab@linux-iscsi.org>
Cc: Sagi Grimberg <sagig@mellanox.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: target-devel@vger.kernel.org
Cc: linux-scsi@vger.kernel.org
---
* Changes from v3:
- Improve the condition in transport_generic_new_cmd() a bit
drivers/target/target_core_transport.c | 27 +++++++++++++++------------
include/target/target_core_base.h | 1 +
2 files changed, 16 insertions(+), 12 deletions(-)
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7a9e7e2..1947e67 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1450,6 +1450,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
if (sgl_prot_count) {
se_cmd->t_prot_sg = sgl_prot;
se_cmd->t_prot_nents = sgl_prot_count;
+ se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
}
/*
@@ -2181,6 +2182,12 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
static inline void transport_free_pages(struct se_cmd *cmd)
{
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+ transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+ cmd->t_prot_sg = NULL;
+ cmd->t_prot_nents = 0;
+ }
+
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
/*
* Release special case READ buffer payload required for
@@ -2204,10 +2211,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
-
- transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
- cmd->t_prot_sg = NULL;
- cmd->t_prot_nents = 0;
}
/**
@@ -2346,6 +2349,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
int ret = 0;
bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+ if (cmd->prot_op != TARGET_PROT_NORMAL &&
+ !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+ ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
+ cmd->prot_length, true);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
/*
* Determine is the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
@@ -2371,14 +2382,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- if (cmd->prot_op != TARGET_PROT_NORMAL) {
- ret = target_alloc_sgl(&cmd->t_prot_sg,
- &cmd->t_prot_nents,
- cmd->prot_length, true);
- if (ret < 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
cmd->data_length, zero_flag);
if (ret < 0)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 480e9f8..13efcdd 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -167,6 +167,7 @@ enum se_cmd_flags_table {
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
SCF_COMPARE_AND_WRITE = 0x00080000,
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
+ SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
--
1.9.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v4 4/4] target: handle odd SG mapping for data transfer memory
[not found] <1430461431-5936-1-git-send-email-akinobu.mita@gmail.com>
2015-05-01 6:23 ` [PATCH v4 1/4] target: Fix inconsistent address passed to kunmap_atomic() in sbc_dif_copy_prot() Akinobu Mita
2015-05-01 6:23 ` [PATCH v4 2/4] target: ensure se_cmd->t_prot_sg is allocated when required Akinobu Mita
@ 2015-05-01 6:23 ` Akinobu Mita
2 siblings, 0 replies; 3+ messages in thread
From: Akinobu Mita @ 2015-05-01 6:23 UTC (permalink / raw)
To: target-devel
Cc: Akinobu Mita, Tim Chen, Herbert Xu, David S. Miller, linux-crypto,
Nicholas Bellinger, Sagi Grimberg, Martin K. Petersen,
Christoph Hellwig, James E.J. Bottomley, linux-scsi
sbc_dif_generate() and sbc_dif_verify() currently assume that each
SG element for data transfer memory doesn't straddle the block size
boundary.
However, when using SG_IO ioctl, we can choose the data transfer
memory which doesn't satisfy that alignment requirement.
In order to handle such cases correctly, this change inverts the outer
loop to iterate data transfer memory and the inner loop to iterate
protection information and enables to calculate CRC for a block which
straddles multiple SG elements.
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: linux-crypto@vger.kernel.org
Cc: Nicholas Bellinger <nab@linux-iscsi.org>
Cc: Sagi Grimberg <sagig@mellanox.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: target-devel@vger.kernel.org
Cc: linux-scsi@vger.kernel.org
---
* Changes from v3:
- Fix inconsistent address passed to kunmap_atomic, reported by Sagi
- Stop operation in sbc_dif_generate() and sbc_dif_verify() when
reaching the end of data SG elements
drivers/target/target_core_sbc.c | 122 ++++++++++++++++++++++++++-------------
1 file changed, 83 insertions(+), 39 deletions(-)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index b765cdd..4a2df6d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1182,27 +1182,50 @@ sbc_dif_generate(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt;
- struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+ struct scatterlist *dsg = cmd->t_data_sg, *psg;
sector_t sector = cmd->t_task_lba;
void *daddr, *paddr;
int i, j, offset = 0;
+ unsigned int block_size = dev->dev_attrib.block_size;
- for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
- daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
- for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+ for (j = 0; j < psg->length;
+ j += sizeof(struct se_dif_v1_tuple)) {
+ __u16 crc;
+ unsigned int avail;
- if (offset >= psg->length) {
- kunmap_atomic(paddr);
- psg = sg_next(psg);
- paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- offset = 0;
+ if (offset >= dsg->length) {
+ offset -= dsg->length;
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
}
- sdt = paddr + offset;
- sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
- dev->dev_attrib.block_size));
+ sdt = paddr + j;
+ avail = min(block_size, dsg->length - offset);
+ crc = crc_t10dif(daddr + offset, avail);
+ if (avail < block_size) {
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ offset = block_size - avail;
+ crc = crc_t10dif_update(crc, daddr, offset);
+ } else {
+ offset += block_size;
+ }
+
+ sdt->guard_tag = cpu_to_be16(crc);
if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
sdt->app_tag = 0;
@@ -1215,26 +1238,23 @@ sbc_dif_generate(struct se_cmd *cmd)
be32_to_cpu(sdt->ref_tag));
sector++;
- offset += sizeof(struct se_dif_v1_tuple);
}
- kunmap_atomic(paddr);
- kunmap_atomic(daddr);
+ kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
}
}
static sense_reason_t
sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt,
- const void *p, sector_t sector, unsigned int ei_lba)
+ __u16 crc, sector_t sector, unsigned int ei_lba)
{
- struct se_device *dev = cmd->se_dev;
- int block_size = dev->dev_attrib.block_size;
__be16 csum;
if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
goto check_ref;
- csum = cpu_to_be16(crc_t10dif(p, block_size));
+ csum = cpu_to_be16(crc);
if (sdt->guard_tag != csum) {
pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
@@ -1317,26 +1337,36 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
{
struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt;
- struct scatterlist *dsg;
+ struct scatterlist *dsg = cmd->t_data_sg;
sector_t sector = start;
void *daddr, *paddr;
- int i, j;
+ int i;
sense_reason_t rc;
+ int dsg_off = 0;
+ unsigned int block_size = dev->dev_attrib.block_size;
- for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
- daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ for (; psg && sector < start + sectors; psg = sg_next(psg)) {
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
- for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+ for (i = psg_off; i < psg->length &&
+ sector < start + sectors;
+ i += sizeof(struct se_dif_v1_tuple)) {
+ __u16 crc;
+ unsigned int avail;
- if (psg_off >= psg->length) {
- kunmap_atomic(paddr - psg->offset);
- psg = sg_next(psg);
- paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- psg_off = 0;
+ if (dsg_off >= dsg->length) {
+ dsg_off -= dsg->length;
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return 0;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
}
- sdt = paddr + psg_off;
+ sdt = paddr + i;
pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
" app_tag: 0x%04x ref_tag: %u\n",
@@ -1344,27 +1374,41 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
sdt->app_tag, be32_to_cpu(sdt->ref_tag));
if (sdt->app_tag == cpu_to_be16(0xffff)) {
- sector++;
- psg_off += sizeof(struct se_dif_v1_tuple);
- continue;
+ dsg_off += block_size;
+ goto next;
+ }
+
+ avail = min(block_size, dsg->length - dsg_off);
+ crc = crc_t10dif(daddr + dsg_off, avail);
+ if (avail < block_size) {
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return 0;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ dsg_off = block_size - avail;
+ crc = crc_t10dif_update(crc, daddr, dsg_off);
+ } else {
+ dsg_off += block_size;
}
- rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
- ei_lba);
+ rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
if (rc) {
- kunmap_atomic(paddr - psg->offset);
kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
cmd->bad_sector = sector;
return rc;
}
-
+next:
sector++;
ei_lba++;
- psg_off += sizeof(struct se_dif_v1_tuple);
}
- kunmap_atomic(paddr - psg->offset);
+ psg_off = 0;
kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
}
return 0;
--
1.9.1
^ permalink raw reply related [flat|nested] 3+ messages in thread