linux-scsi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] TCM Referrals support
@ 2013-10-16  7:25 Hannes Reinecke
  2013-10-16  7:25 ` [PATCH 1/2] target_core_alua: Referrals infrastructure Hannes Reinecke
  2013-10-16  7:25 ` [PATCH 2/2] target_core_alua: Referrals configfs integration Hannes Reinecke
  0 siblings, 2 replies; 7+ messages in thread
From: Hannes Reinecke @ 2013-10-16  7:25 UTC (permalink / raw)
  To: Nic Bellinger; +Cc: Doug Gilber, target-devel, linux-scsi, Hannes Reinecke

Hi Nic,

I've finally bitten the bullet and implemented referrals
support for TCM. It has been on my to-do list for a long
time, but finally I've gotten around to actually _do_ anything about it.

TCM now will enable referrals support when an appropriate
map is being fed into 'lba_map'. Format there is:
  <segment size> <segment multiplier>
  <start lba> <end lba> <pgX:stateY> ... <pgN:stateM>
  ...
  
<pgX> is the number of the first target port group,
<stateY> is a character denominating the ALUA access state:
'O' for 'Active/Optimized', 'A' for 'Active/Non-Optimized',
'S' for 'Standby', and 'U' for 'Unavailable'.
For further details see SBC-3, Section 'Referrals'.
  
Yes, I know, it's far from perfect and violates the
'one item per attribute' rule. But breaking this
down into several attributes and ensure the update
is done consistently _and_ atomically is beyond my
configfs skills. So there.

Hannes Reinecke (2):
  target_core_alua: Referrals infrastructure
  target_core_alua: Referrals configfs integration

 drivers/target/target_core_alua.c      | 252 +++++++++++++++++++++++++++++++++
 drivers/target/target_core_alua.h      |  12 +-
 drivers/target/target_core_configfs.c  | 183 +++++++++++++++++++++++-
 drivers/target/target_core_device.c    |   3 +
 drivers/target/target_core_sbc.c       |   5 +-
 drivers/target/target_core_spc.c       |  20 +++
 drivers/target/target_core_transport.c |  28 +++-
 include/scsi/scsi.h                    |   1 +
 include/target/target_core_base.h      |  18 +++
 9 files changed, 517 insertions(+), 5 deletions(-)

-- 
1.7.12.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/2] target_core_alua: Referrals infrastructure
  2013-10-16  7:25 [PATCH 0/2] TCM Referrals support Hannes Reinecke
@ 2013-10-16  7:25 ` Hannes Reinecke
  2013-10-16 22:28   ` Nicholas A. Bellinger
  2013-10-16  7:25 ` [PATCH 2/2] target_core_alua: Referrals configfs integration Hannes Reinecke
  1 sibling, 1 reply; 7+ messages in thread
From: Hannes Reinecke @ 2013-10-16  7:25 UTC (permalink / raw)
  To: Nic Bellinger; +Cc: Doug Gilber, target-devel, linux-scsi, Hannes Reinecke

Add infrastructure for referrals.

Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/target/target_core_alua.c     | 151 ++++++++++++++++++++++++++++++++++
 drivers/target/target_core_alua.h     |   4 +-
 drivers/target/target_core_configfs.c |  12 ++-
 drivers/target/target_core_device.c   |   2 +
 drivers/target/target_core_sbc.c      |   5 +-
 drivers/target/target_core_spc.c      |  20 +++++
 include/scsi/scsi.h                   |   1 +
 include/target/target_core_base.h     |  18 ++++
 8 files changed, 209 insertions(+), 4 deletions(-)

diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 166bee6..8f66146 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -56,6 +56,75 @@ static LIST_HEAD(lu_gps_list);
 struct t10_alua_lu_gp *default_lu_gp;
 
 /*
+ * REPORT REFERRALS
+ *
+ * See sbc3r35 section 5.23
+ */
+sense_reason_t
+target_emulate_report_referrals(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_alua_lba_map *map;
+	struct t10_alua_lba_map_member *map_mem;
+	unsigned char *buf;
+	u32 rd_len = 0, off;
+
+	if (cmd->data_length < 4) {
+		pr_warn("REPORT REFERRALS allocation length %u too"
+			" small\n", cmd->data_length);
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	off = 4;
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	if (list_empty(&dev->t10_alua.lba_map_list)) {
+		spin_unlock(&dev->t10_alua.lba_map_lock);
+		transport_kunmap_data_sg(cmd);
+
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+
+	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+			    lba_map_list) {
+		int desc_num = off + 3;
+		int pg_num;
+
+		off += 4;
+		put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
+		off += 8;
+		put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
+		off += 8;
+		rd_len += 20;
+		pg_num = 0;
+		list_for_each_entry(map_mem, &map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			buf[off++] = map_mem->lba_map_mem_alua_state & 0x0f;
+			off++;
+			buf[off++] = (map_mem->lba_map_mem_alua_pg_id >> 8) & 0xff;
+			buf[off++] = (map_mem->lba_map_mem_alua_pg_id & 0xff);
+			rd_len += 4;
+			pg_num++;
+		}
+		buf[desc_num] = pg_num;
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+
+	/*
+	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+	 */
+	put_unaligned_be16(rd_len, &buf[2]);
+
+	transport_kunmap_data_sg(cmd);
+
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+/*
  * REPORT_TARGET_PORT_GROUPS
  *
  * See spc4r17 section 6.27
@@ -389,6 +458,80 @@ static inline int core_alua_state_nonoptimized(
 	return 0;
 }
 
+static inline int core_alua_state_lba_dependent(
+	struct se_cmd *cmd,
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	u8 *alua_ascq)
+{
+	struct se_device *dev = cmd->se_dev;
+	u32 segment_size, segment_mult, sectors;
+	u64 lba;
+
+	/* Only need to check for cdb actually containing LBAs */
+	if (!cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)
+		return 0;
+
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	segment_size = dev->t10_alua.lba_map_segment_size;
+	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
+	sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+	lba = cmd->t_task_lba;
+	while (lba < cmd->t_task_lba + sectors) {
+		struct t10_alua_lba_map *cur_map = NULL, *map;
+		struct t10_alua_lba_map_member *map_mem;
+
+		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+				    lba_map_list) {
+			u64 start_lba, last_lba;
+			u64 first_lba = map->lba_map_first_lba;
+
+			if (segment_mult) {
+				start_lba = lba % (segment_size * segment_mult);
+				last_lba = first_lba + segment_size - 1;
+				if (start_lba >= first_lba &&
+				    start_lba <= last_lba) {
+					lba += segment_size;
+					cur_map = map;
+					break;
+				}
+			} else {
+				last_lba = map->lba_map_last_lba;
+				if (lba >= first_lba && lba <= last_lba) {
+					lba = last_lba + 1;
+					cur_map = map;
+					break;
+				}
+			}
+		}
+		if (!cur_map) {
+			spin_unlock(&dev->t10_alua.lba_map_lock);
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			if (map_mem->lba_map_mem_alua_pg_id !=
+			    tg_pt_gp->tg_pt_gp_id)
+				continue;
+			switch(map_mem->lba_map_mem_alua_state) {
+			case ALUA_ACCESS_STATE_STANDBY:
+				spin_unlock(&dev->t10_alua.lba_map_lock);
+				*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+				return 1;
+			case ALUA_ACCESS_STATE_UNAVAILABLE:
+				spin_unlock(&dev->t10_alua.lba_map_lock);
+				*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+				return 1;
+			default:
+				break;
+			}
+		}
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	return 0;
+}
+
 static inline int core_alua_state_standby(
 	struct se_cmd *cmd,
 	unsigned char *cdb,
@@ -586,6 +729,9 @@ target_alua_state_check(struct se_cmd *cmd)
 	case ALUA_ACCESS_STATE_TRANSITION:
 		ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
 		break;
+	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+		ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
+		break;
 	/*
 	 * OFFLINE is a secondary ALUA target port group access state, that is
 	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -670,6 +816,11 @@ core_alua_check_transition(int state, int valid, int *primary)
 			goto not_supported;
 		*primary = 1;
 		break;
+	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+		if (!(valid & ALUA_LBD_SUP))
+			goto not_supported;
+		*primary = 1;
+		break;
 	case ALUA_ACCESS_STATE_OFFLINE:
 		/*
 		 * OFFLINE state is defined as a secondary target port
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 1a152cd..47950cd 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -13,12 +13,13 @@
 /*
  * ASYMMETRIC ACCESS STATE field
  *
- * from spc4r17 section 6.27 Table 245
+ * from spc4r36j section 6.37 Table 307
  */
 #define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED	0x0
 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
 #define ALUA_ACCESS_STATE_STANDBY		0x2
 #define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
+#define ALUA_ACCESS_STATE_LBA_DEPENDENT		0x4
 #define ALUA_ACCESS_STATE_OFFLINE		0xe
 #define ALUA_ACCESS_STATE_TRANSITION		0xf
 
@@ -88,6 +89,7 @@ extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 
 extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
 extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
 extern int core_alua_check_nonop_delay(struct se_cmd *);
 extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
 				struct se_device *, struct se_port *,
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 53e9e00..172a54e 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -2054,7 +2054,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
 			" transition while TPGS_IMPLICIT_ALUA is disabled\n");
 		return -EINVAL;
 	}
-
+	if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
+	    new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
+		/* LBA DEPENDENT is only allowed with implicit ALUA */
+		pr_err("Unable to process implicit configfs ALUA"
+		       " transition while TPGS_EXPLICIT_ALUA is enabled\n");
+		return -EINVAL;
+	}
 	ret = core_alua_do_port_transition(tg_pt_gp, dev,
 					NULL, NULL, new_state, 0);
 	return (!ret) ? count : -EINVAL;
@@ -2163,7 +2169,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_supported_states(
 		return -EINVAL;
 	}
 	new_states = (int)tmp;
-	valid_states = ALUA_T_SUP | ALUA_O_SUP | ALUA_LBD_SUP | \
+	valid_states = ALUA_T_SUP | ALUA_O_SUP | \
 	    ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
 
 
@@ -2172,6 +2178,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_supported_states(
 				new_states);
 		return -EINVAL;
 	}
+	/* LBD_SUP is set once lba_map has been populated */
+	new_states |= tg_pt_gp->tg_pt_gp_alua_supported_states & ALUA_LBD_SUP;
 
 	tg_pt_gp->tg_pt_gp_alua_supported_states = new_states;
 	return count;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d17e10d..f71cc33 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1440,6 +1440,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
 	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
 	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
+	spin_lock_init(&dev->t10_alua.lba_map_lock);
 
 	dev->t10_wwn.t10_dev = dev;
 	dev->t10_alua.t10_dev = dev;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 4714c6f..d25683da 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -33,7 +33,7 @@
 
 #include "target_core_internal.h"
 #include "target_core_ua.h"
-
+#include "target_core_alua.h"
 
 static sense_reason_t
 sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -716,6 +716,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 		case SAI_READ_CAPACITY_16:
 			cmd->execute_cmd = sbc_emulate_readcapacity_16;
 			break;
+		case SAI_REPORT_REFERRALS:
+			cmd->execute_cmd = target_emulate_report_referrals;
+			break;
 		default:
 			pr_err("Unsupported SA: 0x%02x\n",
 				cmd->t_task_cdb[1] & 0x1f);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index e39d442..282b5bb 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -476,6 +476,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
 	/* If WriteCache emulation is enabled, set V_SUP */
 	if (spc_check_dev_wce(dev))
 		buf[6] = 0x01;
+	/* If an LBA map is present set R_SUP */
+	spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
+	if (!list_empty(&dev->t10_alua.lba_map_list))
+		buf[8] = 0x10;
+	spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
 	return 0;
 }
 
@@ -627,6 +632,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
 	return 0;
 }
 
+/* Referrals VPD page */
+static sense_reason_t
+spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[3] = 0x0c;
+	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
+	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
+
+	return 0;
+}
+
 static sense_reason_t
 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
 
@@ -641,6 +660,7 @@ static struct {
 	{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
 	{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
 	{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
+	{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
 };
 
 /* supported vital product data pages */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 66d42ed..0a4edfe 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -155,6 +155,7 @@ enum scsi_timeouts {
 /* values for service action in */
 #define	SAI_READ_CAPACITY_16  0x10
 #define SAI_GET_LBA_STATUS    0x12
+#define SAI_REPORT_REFERRALS  0x13
 /* values for VARIABLE_LENGTH_CMD service action codes
  * see spc4r17 Section D.3.5, table D.7 and D.8 */
 #define VLC_SA_RECEIVE_CREDENTIAL 0x1800
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index b02bb61..34181ad 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -246,10 +246,28 @@ typedef enum {
 
 struct se_cmd;
 
+struct t10_alua_lba_map_member {
+	struct list_head lba_map_mem_list;
+	int lba_map_mem_alua_state;
+	int lba_map_mem_alua_pg_id;
+};
+
+struct t10_alua_lba_map {
+	u64 lba_map_first_lba;
+	u64 lba_map_last_lba;
+	struct list_head lba_map_list;
+	struct list_head lba_map_mem_list;
+};
+
 struct t10_alua {
 	/* ALUA Target Port Group ID */
 	u16	alua_tg_pt_gps_counter;
 	u32	alua_tg_pt_gps_count;
+	/* Referrals support */
+	spinlock_t lba_map_lock;
+	u32     lba_map_segment_size;
+	u32     lba_map_segment_multiplier;
+	struct list_head lba_map_list;
 	spinlock_t tg_pt_gps_lock;
 	struct se_device *t10_dev;
 	/* Used for default ALUA Target Port Group */
-- 
1.7.12.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/2] target_core_alua: Referrals configfs integration
  2013-10-16  7:25 [PATCH 0/2] TCM Referrals support Hannes Reinecke
  2013-10-16  7:25 ` [PATCH 1/2] target_core_alua: Referrals infrastructure Hannes Reinecke
@ 2013-10-16  7:25 ` Hannes Reinecke
  2013-10-17  0:36   ` Nicholas A. Bellinger
  1 sibling, 1 reply; 7+ messages in thread
From: Hannes Reinecke @ 2013-10-16  7:25 UTC (permalink / raw)
  To: Nic Bellinger; +Cc: Doug Gilber, target-devel, linux-scsi, Hannes Reinecke

Referrals need an LBA map, which needs to be kept
consistent across all target port groups. So
instead of tying the map to the target port groups
I've implemented a single attribute containing the
entire map.

Signed-off-by: Hannes Reinecke <hare@suse.de>
---
 drivers/target/target_core_alua.c      | 101 +++++++++++++++++++
 drivers/target/target_core_alua.h      |   8 ++
 drivers/target/target_core_configfs.c  | 171 +++++++++++++++++++++++++++++++++
 drivers/target/target_core_device.c    |   1 +
 drivers/target/target_core_transport.c |  28 +++++-
 5 files changed, 308 insertions(+), 1 deletion(-)

diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 8f66146..9dd01ff 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -1340,6 +1340,107 @@ static int core_alua_set_tg_pt_secondary_state(
 	return 0;
 }
 
+struct t10_alua_lba_map *
+core_alua_allocate_lba_map(struct list_head *list,
+			   u64 first_lba, u64 last_lba)
+{
+	struct t10_alua_lba_map *lba_map;
+
+	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
+	if (!lba_map) {
+		pr_err("Unable to allocate struct t10_alua_lba_map\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
+	lba_map->lba_map_first_lba = first_lba;
+	lba_map->lba_map_last_lba = last_lba;
+
+	list_add_tail(&lba_map->lba_map_list, list);
+	return lba_map;
+}
+
+int
+core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
+			       int pg_id, int state)
+{
+	struct t10_alua_lba_map_member *lba_map_mem;
+
+	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
+			    lba_map_mem_list) {
+		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
+			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
+			return -EINVAL;
+		}
+	}
+
+	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
+	if (!lba_map_mem) {
+		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
+		return -ENOMEM;
+	}
+	lba_map_mem->lba_map_mem_alua_state = state;
+	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
+
+	list_add_tail(&lba_map_mem->lba_map_mem_list,
+		      &lba_map->lba_map_mem_list);
+	return 0;
+}
+
+void
+core_alua_free_lba_map(struct list_head *lba_list)
+{
+	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
+	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
+
+	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
+				 lba_map_list) {
+		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
+					 &lba_map->lba_map_mem_list,
+					 lba_map_mem_list) {
+			list_del(&lba_map_mem->lba_map_mem_list);
+			kmem_cache_free(t10_alua_lba_map_mem_cache,
+					lba_map_mem);
+		}
+		list_del(&lba_map->lba_map_list);
+		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
+	}
+}
+
+void
+core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
+		      int segment_size, int segment_mult)
+{
+	struct list_head old_lba_map_list;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	int activate = 0, supported;
+
+	INIT_LIST_HEAD(&old_lba_map_list);
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	dev->t10_alua.lba_map_segment_size = segment_size;
+	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
+	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
+	if (lba_map_list) {
+		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
+		activate = 1;
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+			    tg_pt_gp_list) {
+
+		if (!tg_pt_gp->tg_pt_gp_valid_id)
+			continue;
+		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
+		if (activate)
+			supported |= ALUA_LBD_SUP;
+		else
+			supported &= ~ALUA_LBD_SUP;
+		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
+	}
+	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+	core_alua_free_lba_map(&old_lba_map_list);
+}
+
 struct t10_alua_lu_gp *
 core_alua_allocate_lu_gp(const char *name, int def_group)
 {
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 47950cd..0a7d65e 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -86,6 +86,8 @@ extern struct kmem_cache *t10_alua_lu_gp_cache;
 extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
 extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
 extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+extern struct kmem_cache *t10_alua_lba_map_cache;
+extern struct kmem_cache *t10_alua_lba_map_mem_cache;
 
 extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
 extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
@@ -95,6 +97,12 @@ extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
 				struct se_device *, struct se_port *,
 				struct se_node_acl *, int, int);
 extern char *core_alua_dump_status(int);
+extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
+				struct list_head *, u64, u64);
+extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
+extern void core_alua_free_lba_map(struct list_head *);
+extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
+				int, int);
 extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
 extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
 extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 172a54e..613cafb 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1741,6 +1741,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
 	.store	= target_core_store_alua_lu_gp,
 };
 
+static ssize_t target_core_show_dev_lba_map(void *p, char *page)
+{
+	struct se_device *dev = p;
+	struct t10_alua_lba_map *map;
+	struct t10_alua_lba_map_member *mem;
+	char *b = page;
+	int bl = 0;
+	char state;
+
+	spin_lock(&dev->t10_alua.lba_map_lock);
+	if (!list_empty(&dev->t10_alua.lba_map_list))
+	    bl += sprintf(b + bl, "%u %u\n",
+			  dev->t10_alua.lba_map_segment_size,
+			  dev->t10_alua.lba_map_segment_multiplier);
+	list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
+		bl += sprintf(b + bl, "%llu %llu",
+			      map->lba_map_first_lba, map->lba_map_last_lba);
+		list_for_each_entry(mem, &map->lba_map_mem_list,
+				    lba_map_mem_list) {
+			switch (mem->lba_map_mem_alua_state) {
+			case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+				state = 'O';
+				break;
+			case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+				state = 'A';
+				break;
+			case ALUA_ACCESS_STATE_STANDBY:
+				state = 'S';
+				break;
+			case ALUA_ACCESS_STATE_UNAVAILABLE:
+				state = 'U';
+				break;
+			default:
+				state = '.';
+				break;
+			}
+			bl += sprintf(b + bl, " %d:%c",
+				      mem->lba_map_mem_alua_pg_id, state);
+		}
+		bl += sprintf(b + bl, "\n");
+	}
+	spin_unlock(&dev->t10_alua.lba_map_lock);
+	return bl;
+}
+
+static ssize_t target_core_store_dev_lba_map(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev = p;
+	struct t10_alua_lba_map *lba_map = NULL;
+	struct list_head lba_list;
+	char *map_entries, *ptr;
+	char state;
+	int pg_num = -1, pg;
+	int ret = 0, num = 0, pg_id, alua_state;
+	unsigned long start_lba = -1, end_lba = -1;
+	unsigned long segment_size = -1, segment_mult = -1;
+
+	map_entries = kstrdup(page, GFP_KERNEL);
+	if (!map_entries)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&lba_list);
+	while ((ptr = strsep(&map_entries, "\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		if (num == 0) {
+			if (sscanf(ptr, "%lu %lu\n",
+				   &segment_size, &segment_mult) != 2) {
+				pr_err("Invalid line %d\n", num);
+				ret = -EINVAL;
+				break;
+			}
+			num++;
+			continue;
+		}
+		if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
+			pr_err("Invalid line %d\n", num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr = strchr(ptr, ' ');
+		if (!ptr) {
+			pr_err("Invalid line %d, missing end lba\n", num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr++;
+		ptr = strchr(ptr, ' ');
+		if (!ptr) {
+			pr_err("Invalid line %d, missing state definitions\n",
+			       num);
+			ret = -EINVAL;
+			break;
+		}
+		ptr++;
+		lba_map = core_alua_allocate_lba_map(&lba_list,
+						     start_lba, end_lba);
+		if (IS_ERR(lba_map)) {
+			ret = PTR_ERR(lba_map);
+			break;
+		}
+		pg = 0;
+		while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
+			switch (state) {
+			case 'O':
+				alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
+				break;
+			case 'A':
+				alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
+				break;
+			case 'S':
+				alua_state = ALUA_ACCESS_STATE_STANDBY;
+				break;
+			case 'U':
+				alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
+				break;
+			default:
+				pr_err("Invalid ALUA state '%c'\n", state);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			ret = core_alua_allocate_lba_map_mem(lba_map,
+							     pg_id, alua_state);
+			if (ret) {
+				pr_err("Invalid target descriptor %d:%c "
+				       "at line %d\n",
+				       pg_id, state, num);
+				break;
+			}
+			pg++;
+			ptr = strchr(ptr, ' ');
+			if (ptr)
+				ptr++;
+			else
+				break;
+		}
+		if (pg_num == -1)
+		    pg_num = pg;
+		else if (pg != pg_num) {
+			pr_err("Only %d from %d port groups definitions "
+			       "at line %d\n", pg, pg_num, num);
+			ret = -EINVAL;
+			break;
+		}
+		num++;
+	}
+out:
+	if (ret) {
+		core_alua_free_lba_map(&lba_list);
+		count = ret;
+	} else
+		core_alua_set_lba_map(dev, &lba_list,
+				      segment_size, segment_mult);
+	kfree(map_entries);
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "lba_map",
+		    .ca_mode = S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_lba_map,
+	.store	= target_core_store_dev_lba_map,
+};
+
 static struct configfs_attribute *lio_core_dev_attrs[] = {
 	&target_core_attr_dev_info.attr,
 	&target_core_attr_dev_control.attr,
@@ -1748,6 +1918,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
 	&target_core_attr_dev_udev_path.attr,
 	&target_core_attr_dev_enable.attr,
 	&target_core_attr_dev_alua_lu_gp.attr,
+	&target_core_attr_dev_lba_map.attr,
 	NULL,
 };
 
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index f71cc33..6db76af 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1578,6 +1578,7 @@ void target_free_device(struct se_device *dev)
 	}
 
 	core_alua_free_lu_gp_mem(dev);
+	core_alua_set_lba_map(dev, NULL, 0, 0);
 	core_scsi3_free_all_registrations(dev);
 	se_release_vpd_for_dev(dev);
 
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 98bb7c4..e34d4b4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -63,6 +63,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
 struct kmem_cache *t10_alua_lu_gp_mem_cache;
 struct kmem_cache *t10_alua_tg_pt_gp_cache;
 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+struct kmem_cache *t10_alua_lba_map_cache;
+struct kmem_cache *t10_alua_lba_map_mem_cache;
 
 static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
@@ -129,14 +131,36 @@ int init_se_kmem_caches(void)
 				"mem_t failed\n");
 		goto out_free_tg_pt_gp_cache;
 	}
+	t10_alua_lba_map_cache = kmem_cache_create(
+			"t10_alua_lba_map_cache",
+			sizeof(struct t10_alua_lba_map),
+			__alignof__(struct t10_alua_lba_map), 0, NULL);
+	if (!t10_alua_lba_map_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lba_map_"
+				"cache failed\n");
+		goto out_free_tg_pt_gp_mem_cache;
+	}
+	t10_alua_lba_map_mem_cache = kmem_cache_create(
+			"t10_alua_lba_map_mem_cache",
+			sizeof(struct t10_alua_lba_map_member),
+			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
+	if (!t10_alua_lba_map_mem_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
+				"cache failed\n");
+		goto out_free_lba_map_cache;
+	}
 
 	target_completion_wq = alloc_workqueue("target_completion",
 					       WQ_MEM_RECLAIM, 0);
 	if (!target_completion_wq)
-		goto out_free_tg_pt_gp_mem_cache;
+		goto out_free_lba_map_mem_cache;
 
 	return 0;
 
+out_free_lba_map_mem_cache:
+	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+out_free_lba_map_cache:
+	kmem_cache_destroy(t10_alua_lba_map_cache);
 out_free_tg_pt_gp_mem_cache:
 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
 out_free_tg_pt_gp_cache:
@@ -165,6 +189,8 @@ void release_se_kmem_caches(void)
 	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
 	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
 	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+	kmem_cache_destroy(t10_alua_lba_map_cache);
+	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 }
 
 /* This code ensures unique mib indexes are handed out. */
-- 
1.7.12.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] target_core_alua: Referrals infrastructure
  2013-10-16  7:25 ` [PATCH 1/2] target_core_alua: Referrals infrastructure Hannes Reinecke
@ 2013-10-16 22:28   ` Nicholas A. Bellinger
  2013-10-17  7:38     ` Hannes Reinecke
  0 siblings, 1 reply; 7+ messages in thread
From: Nicholas A. Bellinger @ 2013-10-16 22:28 UTC (permalink / raw)
  To: Hannes Reinecke; +Cc: Nic Bellinger, Doug Gilber, target-devel, linux-scsi

On Wed, 2013-10-16 at 09:25 +0200, Hannes Reinecke wrote:
> Add infrastructure for referrals.
> 
> Signed-off-by: Hannes Reinecke <hare@suse.de>
> ---
>  drivers/target/target_core_alua.c     | 151 ++++++++++++++++++++++++++++++++++
>  drivers/target/target_core_alua.h     |   4 +-
>  drivers/target/target_core_configfs.c |  12 ++-
>  drivers/target/target_core_device.c   |   2 +
>  drivers/target/target_core_sbc.c      |   5 +-
>  drivers/target/target_core_spc.c      |  20 +++++
>  include/scsi/scsi.h                   |   1 +
>  include/target/target_core_base.h     |  18 ++++
>  8 files changed, 209 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
> index 166bee6..8f66146 100644
> --- a/drivers/target/target_core_alua.c
> +++ b/drivers/target/target_core_alua.c
> @@ -56,6 +56,75 @@ static LIST_HEAD(lu_gps_list);
>  struct t10_alua_lu_gp *default_lu_gp;
>  
>  /*
> + * REPORT REFERRALS
> + *
> + * See sbc3r35 section 5.23
> + */
> +sense_reason_t
> +target_emulate_report_referrals(struct se_cmd *cmd)
> +{
> +	struct se_device *dev = cmd->se_dev;
> +	struct t10_alua_lba_map *map;
> +	struct t10_alua_lba_map_member *map_mem;
> +	unsigned char *buf;
> +	u32 rd_len = 0, off;
> +
> +	if (cmd->data_length < 4) {
> +		pr_warn("REPORT REFERRALS allocation length %u too"
> +			" small\n", cmd->data_length);
> +		return TCM_INVALID_CDB_FIELD;
> +	}
> +
> +	buf = transport_kmap_data_sg(cmd);
> +	if (!buf)
> +		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
> +
> +	off = 4;
> +	spin_lock(&dev->t10_alua.lba_map_lock);
> +	if (list_empty(&dev->t10_alua.lba_map_list)) {
> +		spin_unlock(&dev->t10_alua.lba_map_lock);
> +		transport_kunmap_data_sg(cmd);
> +
> +		return TCM_UNSUPPORTED_SCSI_OPCODE;
> +	}
> +
> +	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
> +			    lba_map_list) {
> +		int desc_num = off + 3;
> +		int pg_num;
> +
> +		off += 4;
> +		put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
> +		off += 8;
> +		put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
> +		off += 8;
> +		rd_len += 20;
> +		pg_num = 0;
> +		list_for_each_entry(map_mem, &map->lba_map_mem_list,
> +				    lba_map_mem_list) {
> +			buf[off++] = map_mem->lba_map_mem_alua_state & 0x0f;
> +			off++;
> +			buf[off++] = (map_mem->lba_map_mem_alua_pg_id >> 8) & 0xff;
> +			buf[off++] = (map_mem->lba_map_mem_alua_pg_id & 0xff);
> +			rd_len += 4;
> +			pg_num++;
> +		}
> +		buf[desc_num] = pg_num;
> +	}
> +	spin_unlock(&dev->t10_alua.lba_map_lock);
> +

For both of these list walks, there needs to be a check against offset
vs. ->data_length to know when the available payload length has been
exhausted..

> +	/*
> +	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
> +	 */
> +	put_unaligned_be16(rd_len, &buf[2]);
> +
> +	transport_kunmap_data_sg(cmd);
> +
> +	target_complete_cmd(cmd, GOOD);
> +	return 0;
> +}
> +
> +/*
>   * REPORT_TARGET_PORT_GROUPS
>   *
>   * See spc4r17 section 6.27
> @@ -389,6 +458,80 @@ static inline int core_alua_state_nonoptimized(
>  	return 0;
>  }
>  
> +static inline int core_alua_state_lba_dependent(
> +	struct se_cmd *cmd,
> +	struct t10_alua_tg_pt_gp *tg_pt_gp,
> +	u8 *alua_ascq)
> +{
> +	struct se_device *dev = cmd->se_dev;
> +	u32 segment_size, segment_mult, sectors;
> +	u64 lba;
> +
> +	/* Only need to check for cdb actually containing LBAs */
> +	if (!cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)
> +		return 0;
> +
> +	spin_lock(&dev->t10_alua.lba_map_lock);
> +	segment_size = dev->t10_alua.lba_map_segment_size;
> +	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
> +	sectors = cmd->data_length / dev->dev_attrib.block_size;
> +
> +	lba = cmd->t_task_lba;
> +	while (lba < cmd->t_task_lba + sectors) {
> +		struct t10_alua_lba_map *cur_map = NULL, *map;
> +		struct t10_alua_lba_map_member *map_mem;
> +
> +		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
> +				    lba_map_list) {
> +			u64 start_lba, last_lba;
> +			u64 first_lba = map->lba_map_first_lba;
> +
> +			if (segment_mult) {
> +				start_lba = lba % (segment_size * segment_mult);
> +				last_lba = first_lba + segment_size - 1;
> +				if (start_lba >= first_lba &&
> +				    start_lba <= last_lba) {
> +					lba += segment_size;
> +					cur_map = map;
> +					break;
> +				}
> +			} else {
> +				last_lba = map->lba_map_last_lba;
> +				if (lba >= first_lba && lba <= last_lba) {
> +					lba = last_lba + 1;
> +					cur_map = map;
> +					break;
> +				}
> +			}
> +		}
> +		if (!cur_map) {
> +			spin_unlock(&dev->t10_alua.lba_map_lock);
> +			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
> +			return 1;
> +		}
> +		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
> +				    lba_map_mem_list) {
> +			if (map_mem->lba_map_mem_alua_pg_id !=
> +			    tg_pt_gp->tg_pt_gp_id)
> +				continue;
> +			switch(map_mem->lba_map_mem_alua_state) {
> +			case ALUA_ACCESS_STATE_STANDBY:
> +				spin_unlock(&dev->t10_alua.lba_map_lock);
> +				*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
> +				return 1;
> +			case ALUA_ACCESS_STATE_UNAVAILABLE:
> +				spin_unlock(&dev->t10_alua.lba_map_lock);
> +				*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
> +				return 1;
> +			default:
> +				break;
> +			}
> +		}
> +	}
> +	spin_unlock(&dev->t10_alua.lba_map_lock);
> +	return 0;
> +}
> +
>  static inline int core_alua_state_standby(
>  	struct se_cmd *cmd,
>  	unsigned char *cdb,
> @@ -586,6 +729,9 @@ target_alua_state_check(struct se_cmd *cmd)
>  	case ALUA_ACCESS_STATE_TRANSITION:
>  		ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
>  		break;
> +	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
> +		ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
> +		break;
>  	/*
>  	 * OFFLINE is a secondary ALUA target port group access state, that is
>  	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
> @@ -670,6 +816,11 @@ core_alua_check_transition(int state, int valid, int *primary)
>  			goto not_supported;
>  		*primary = 1;
>  		break;
> +	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
> +		if (!(valid & ALUA_LBD_SUP))
> +			goto not_supported;
> +		*primary = 1;
> +		break;
>  	case ALUA_ACCESS_STATE_OFFLINE:
>  		/*
>  		 * OFFLINE state is defined as a secondary target port
> diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
> index 1a152cd..47950cd 100644
> --- a/drivers/target/target_core_alua.h
> +++ b/drivers/target/target_core_alua.h
> @@ -13,12 +13,13 @@
>  /*
>   * ASYMMETRIC ACCESS STATE field
>   *
> - * from spc4r17 section 6.27 Table 245
> + * from spc4r36j section 6.37 Table 307
>   */
>  #define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED	0x0
>  #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
>  #define ALUA_ACCESS_STATE_STANDBY		0x2
>  #define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
> +#define ALUA_ACCESS_STATE_LBA_DEPENDENT		0x4
>  #define ALUA_ACCESS_STATE_OFFLINE		0xe
>  #define ALUA_ACCESS_STATE_TRANSITION		0xf
>  
> @@ -88,6 +89,7 @@ extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
>  
>  extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
>  extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
> +extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
>  extern int core_alua_check_nonop_delay(struct se_cmd *);
>  extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
>  				struct se_device *, struct se_port *,
> diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
> index 53e9e00..172a54e 100644
> --- a/drivers/target/target_core_configfs.c
> +++ b/drivers/target/target_core_configfs.c
> @@ -2054,7 +2054,13 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
>  			" transition while TPGS_IMPLICIT_ALUA is disabled\n");
>  		return -EINVAL;
>  	}
> -
> +	if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
> +	    new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
> +		/* LBA DEPENDENT is only allowed with implicit ALUA */
> +		pr_err("Unable to process implicit configfs ALUA"
> +		       " transition while TPGS_EXPLICIT_ALUA is enabled\n");
> +		return -EINVAL;
> +	}
>  	ret = core_alua_do_port_transition(tg_pt_gp, dev,
>  					NULL, NULL, new_state, 0);
>  	return (!ret) ? count : -EINVAL;
> @@ -2163,7 +2169,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_supported_states(
>  		return -EINVAL;
>  	}
>  	new_states = (int)tmp;
> -	valid_states = ALUA_T_SUP | ALUA_O_SUP | ALUA_LBD_SUP | \
> +	valid_states = ALUA_T_SUP | ALUA_O_SUP | \
>  	    ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
>  
> 
> @@ -2172,6 +2178,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_supported_states(
>  				new_states);
>  		return -EINVAL;
>  	}
> +	/* LBD_SUP is set once lba_map has been populated */
> +	new_states |= tg_pt_gp->tg_pt_gp_alua_supported_states & ALUA_LBD_SUP;
>  
>  	tg_pt_gp->tg_pt_gp_alua_supported_states = new_states;
>  	return count;
> diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
> index d17e10d..f71cc33 100644
> --- a/drivers/target/target_core_device.c
> +++ b/drivers/target/target_core_device.c
> @@ -1440,6 +1440,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
>  	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
>  	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
>  	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
> +	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
> +	spin_lock_init(&dev->t10_alua.lba_map_lock);
>  
>  	dev->t10_wwn.t10_dev = dev;
>  	dev->t10_alua.t10_dev = dev;
> diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
> index 4714c6f..d25683da 100644
> --- a/drivers/target/target_core_sbc.c
> +++ b/drivers/target/target_core_sbc.c
> @@ -33,7 +33,7 @@
>  
>  #include "target_core_internal.h"
>  #include "target_core_ua.h"
> -
> +#include "target_core_alua.h"
>  
>  static sense_reason_t
>  sbc_emulate_readcapacity(struct se_cmd *cmd)
> @@ -716,6 +716,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
>  		case SAI_READ_CAPACITY_16:
>  			cmd->execute_cmd = sbc_emulate_readcapacity_16;
>  			break;
> +		case SAI_REPORT_REFERRALS:
> +			cmd->execute_cmd = target_emulate_report_referrals;
> +			break;
>  		default:
>  			pr_err("Unsupported SA: 0x%02x\n",
>  				cmd->t_task_cdb[1] & 0x1f);
> diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
> index e39d442..282b5bb 100644
> --- a/drivers/target/target_core_spc.c
> +++ b/drivers/target/target_core_spc.c
> @@ -476,6 +476,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
>  	/* If WriteCache emulation is enabled, set V_SUP */
>  	if (spc_check_dev_wce(dev))
>  		buf[6] = 0x01;
> +	/* If an LBA map is present set R_SUP */
> +	spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
> +	if (!list_empty(&dev->t10_alua.lba_map_list))
> +		buf[8] = 0x10;
> +	spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
>  	return 0;
>  }

Is there ever a case where R_SUP should be reported, but lba_map_list is
empty..?

How about a se_device attribute called 'emulate_referrals' to determine
when to report R_SUP..?  Otherwise, perhaps using the se_lun -> se_port
-> sep_alua_tg_pt_gp_mem -> tg_pt_gp provided bit for
tg_pt_gp_alua_supported_states instead..?

>  
> @@ -627,6 +632,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
>  	return 0;
>  }
>  
> +/* Referrals VPD page */
> +static sense_reason_t
> +spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
> +{
> +	struct se_device *dev = cmd->se_dev;
> +
> +	buf[0] = dev->transport->get_device_type(dev);
> +	buf[3] = 0x0c;
> +	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
> +	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
> +

Typo..  Offset for byte 12 should be the lba_map_segment_multiplier..

> +	return 0;
> +}
> +
>  static sense_reason_t
>  spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
>  
> @@ -641,6 +660,7 @@ static struct {
>  	{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
>  	{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
>  	{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
> +	{ .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
>  };
>  
>  /* supported vital product data pages */
> diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
> index 66d42ed..0a4edfe 100644
> --- a/include/scsi/scsi.h
> +++ b/include/scsi/scsi.h
> @@ -155,6 +155,7 @@ enum scsi_timeouts {
>  /* values for service action in */
>  #define	SAI_READ_CAPACITY_16  0x10
>  #define SAI_GET_LBA_STATUS    0x12
> +#define SAI_REPORT_REFERRALS  0x13
>  /* values for VARIABLE_LENGTH_CMD service action codes
>   * see spc4r17 Section D.3.5, table D.7 and D.8 */
>  #define VLC_SA_RECEIVE_CREDENTIAL 0x1800
> diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
> index b02bb61..34181ad 100644
> --- a/include/target/target_core_base.h
> +++ b/include/target/target_core_base.h
> @@ -246,10 +246,28 @@ typedef enum {
>  
>  struct se_cmd;
>  
> +struct t10_alua_lba_map_member {
> +	struct list_head lba_map_mem_list;
> +	int lba_map_mem_alua_state;
> +	int lba_map_mem_alua_pg_id;
> +};
> +
> +struct t10_alua_lba_map {
> +	u64 lba_map_first_lba;
> +	u64 lba_map_last_lba;
> +	struct list_head lba_map_list;
> +	struct list_head lba_map_mem_list;
> +};
> +
>  struct t10_alua {
>  	/* ALUA Target Port Group ID */
>  	u16	alua_tg_pt_gps_counter;
>  	u32	alua_tg_pt_gps_count;
> +	/* Referrals support */
> +	spinlock_t lba_map_lock;
> +	u32     lba_map_segment_size;
> +	u32     lba_map_segment_multiplier;
> +	struct list_head lba_map_list;
>  	spinlock_t tg_pt_gps_lock;
>  	struct se_device *t10_dev;
>  	/* Used for default ALUA Target Port Group */

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] target_core_alua: Referrals configfs integration
  2013-10-16  7:25 ` [PATCH 2/2] target_core_alua: Referrals configfs integration Hannes Reinecke
@ 2013-10-17  0:36   ` Nicholas A. Bellinger
  2013-10-17  7:42     ` Hannes Reinecke
  0 siblings, 1 reply; 7+ messages in thread
From: Nicholas A. Bellinger @ 2013-10-17  0:36 UTC (permalink / raw)
  To: Hannes Reinecke; +Cc: Nic Bellinger, Doug Gilber, target-devel, linux-scsi

On Wed, 2013-10-16 at 09:25 +0200, Hannes Reinecke wrote:
> Referrals need an LBA map, which needs to be kept
> consistent across all target port groups. So
> instead of tying the map to the target port groups
> I've implemented a single attribute containing the
> entire map.
> 
> Signed-off-by: Hannes Reinecke <hare@suse.de>
> ---
>  drivers/target/target_core_alua.c      | 101 +++++++++++++++++++
>  drivers/target/target_core_alua.h      |   8 ++
>  drivers/target/target_core_configfs.c  | 171 +++++++++++++++++++++++++++++++++
>  drivers/target/target_core_device.c    |   1 +
>  drivers/target/target_core_transport.c |  28 +++++-
>  5 files changed, 308 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
> index 8f66146..9dd01ff 100644
> --- a/drivers/target/target_core_alua.c
> +++ b/drivers/target/target_core_alua.c
> @@ -1340,6 +1340,107 @@ static int core_alua_set_tg_pt_secondary_state(
>  	return 0;
>  }
>  
> +struct t10_alua_lba_map *
> +core_alua_allocate_lba_map(struct list_head *list,
> +			   u64 first_lba, u64 last_lba)
> +{
> +	struct t10_alua_lba_map *lba_map;
> +
> +	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
> +	if (!lba_map) {
> +		pr_err("Unable to allocate struct t10_alua_lba_map\n");
> +		return ERR_PTR(-ENOMEM);
> +	}
> +	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
> +	lba_map->lba_map_first_lba = first_lba;
> +	lba_map->lba_map_last_lba = last_lba;
> +
> +	list_add_tail(&lba_map->lba_map_list, list);
> +	return lba_map;
> +}

This list_add_tail needs to be protected, no..?

> +
> +int
> +core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
> +			       int pg_id, int state)
> +{
> +	struct t10_alua_lba_map_member *lba_map_mem;
> +
> +	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
> +			    lba_map_mem_list) {
> +		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
> +			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
> +			return -EINVAL;
> +		}
> +	}
> +
> +	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
> +	if (!lba_map_mem) {
> +		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
> +		return -ENOMEM;
> +	}
> +	lba_map_mem->lba_map_mem_alua_state = state;
> +	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
> +
> +	list_add_tail(&lba_map_mem->lba_map_mem_list,
> +		      &lba_map->lba_map_mem_list);
> +	return 0;
> +}

Ditto here..

> +
> +void
> +core_alua_free_lba_map(struct list_head *lba_list)
> +{
> +	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
> +	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
> +
> +	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
> +				 lba_map_list) {
> +		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
> +					 &lba_map->lba_map_mem_list,
> +					 lba_map_mem_list) {
> +			list_del(&lba_map_mem->lba_map_mem_list);
> +			kmem_cache_free(t10_alua_lba_map_mem_cache,
> +					lba_map_mem);
> +		}
> +		list_del(&lba_map->lba_map_list);
> +		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
> +	}
> +}

And here..

> +
> +void
> +core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
> +		      int segment_size, int segment_mult)
> +{
> +	struct list_head old_lba_map_list;
> +	struct t10_alua_tg_pt_gp *tg_pt_gp;
> +	int activate = 0, supported;
> +
> +	INIT_LIST_HEAD(&old_lba_map_list);
> +	spin_lock(&dev->t10_alua.lba_map_lock);
> +	dev->t10_alua.lba_map_segment_size = segment_size;
> +	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
> +	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
> +	if (lba_map_list) {
> +		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
> +		activate = 1;
> +	}
> +	spin_unlock(&dev->t10_alua.lba_map_lock);
> +	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
> +	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
> +			    tg_pt_gp_list) {
> +
> +		if (!tg_pt_gp->tg_pt_gp_valid_id)
> +			continue;
> +		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
> +		if (activate)
> +			supported |= ALUA_LBD_SUP;
> +		else
> +			supported &= ~ALUA_LBD_SUP;
> +		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
> +	}
> +	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
> +	core_alua_free_lba_map(&old_lba_map_list);
> +}
> +
>  struct t10_alua_lu_gp *
>  core_alua_allocate_lu_gp(const char *name, int def_group)
>  {
> diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
> index 47950cd..0a7d65e 100644
> --- a/drivers/target/target_core_alua.h
> +++ b/drivers/target/target_core_alua.h
> @@ -86,6 +86,8 @@ extern struct kmem_cache *t10_alua_lu_gp_cache;
>  extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
>  extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
>  extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
> +extern struct kmem_cache *t10_alua_lba_map_cache;
> +extern struct kmem_cache *t10_alua_lba_map_mem_cache;
>  
>  extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
>  extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
> @@ -95,6 +97,12 @@ extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
>  				struct se_device *, struct se_port *,
>  				struct se_node_acl *, int, int);
>  extern char *core_alua_dump_status(int);
> +extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
> +				struct list_head *, u64, u64);
> +extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
> +extern void core_alua_free_lba_map(struct list_head *);
> +extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
> +				int, int);
>  extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
>  extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
>  extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
> diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
> index 172a54e..613cafb 100644
> --- a/drivers/target/target_core_configfs.c
> +++ b/drivers/target/target_core_configfs.c
> @@ -1741,6 +1741,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
>  	.store	= target_core_store_alua_lu_gp,
>  };
>  
> +static ssize_t target_core_show_dev_lba_map(void *p, char *page)
> +{
> +	struct se_device *dev = p;
> +	struct t10_alua_lba_map *map;
> +	struct t10_alua_lba_map_member *mem;
> +	char *b = page;
> +	int bl = 0;
> +	char state;
> +
> +	spin_lock(&dev->t10_alua.lba_map_lock);
> +	if (!list_empty(&dev->t10_alua.lba_map_list))
> +	    bl += sprintf(b + bl, "%u %u\n",
> +			  dev->t10_alua.lba_map_segment_size,
> +			  dev->t10_alua.lba_map_segment_multiplier);
> +	list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
> +		bl += sprintf(b + bl, "%llu %llu",
> +			      map->lba_map_first_lba, map->lba_map_last_lba);
> +		list_for_each_entry(mem, &map->lba_map_mem_list,
> +				    lba_map_mem_list) {
> +			switch (mem->lba_map_mem_alua_state) {
> +			case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
> +				state = 'O';
> +				break;
> +			case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
> +				state = 'A';
> +				break;
> +			case ALUA_ACCESS_STATE_STANDBY:
> +				state = 'S';
> +				break;
> +			case ALUA_ACCESS_STATE_UNAVAILABLE:
> +				state = 'U';
> +				break;
> +			default:
> +				state = '.';
> +				break;
> +			}
> +			bl += sprintf(b + bl, " %d:%c",
> +				      mem->lba_map_mem_alua_pg_id, state);
> +		}
> +		bl += sprintf(b + bl, "\n");
> +	}
> +	spin_unlock(&dev->t10_alua.lba_map_lock);
> +	return bl;
> +}

Unfortunately due to the existing limitations of configfs/sysfs
attribute output, the writing to *page needs to be limited to PAGE_SIZE.

> +
> +static ssize_t target_core_store_dev_lba_map(
> +	void *p,
> +	const char *page,
> +	size_t count)
> +{
> +	struct se_device *dev = p;
> +	struct t10_alua_lba_map *lba_map = NULL;
> +	struct list_head lba_list;
> +	char *map_entries, *ptr;
> +	char state;
> +	int pg_num = -1, pg;
> +	int ret = 0, num = 0, pg_id, alua_state;
> +	unsigned long start_lba = -1, end_lba = -1;
> +	unsigned long segment_size = -1, segment_mult = -1;
> +
> +	map_entries = kstrdup(page, GFP_KERNEL);
> +	if (!map_entries)
> +		return -ENOMEM;
> +
> +	INIT_LIST_HEAD(&lba_list);
> +	while ((ptr = strsep(&map_entries, "\n")) != NULL) {
> +		if (!*ptr)
> +			continue;
> +
> +		if (num == 0) {
> +			if (sscanf(ptr, "%lu %lu\n",
> +				   &segment_size, &segment_mult) != 2) {
> +				pr_err("Invalid line %d\n", num);
> +				ret = -EINVAL;
> +				break;
> +			}
> +			num++;
> +			continue;
> +		}
> +		if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
> +			pr_err("Invalid line %d\n", num);
> +			ret = -EINVAL;
> +			break;
> +		}
> +		ptr = strchr(ptr, ' ');
> +		if (!ptr) {
> +			pr_err("Invalid line %d, missing end lba\n", num);
> +			ret = -EINVAL;
> +			break;
> +		}
> +		ptr++;
> +		ptr = strchr(ptr, ' ');
> +		if (!ptr) {
> +			pr_err("Invalid line %d, missing state definitions\n",
> +			       num);
> +			ret = -EINVAL;
> +			break;
> +		}
> +		ptr++;
> +		lba_map = core_alua_allocate_lba_map(&lba_list,
> +						     start_lba, end_lba);
> +		if (IS_ERR(lba_map)) {
> +			ret = PTR_ERR(lba_map);
> +			break;
> +		}
> +		pg = 0;
> +		while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
> +			switch (state) {
> +			case 'O':
> +				alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
> +				break;
> +			case 'A':
> +				alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
> +				break;
> +			case 'S':
> +				alua_state = ALUA_ACCESS_STATE_STANDBY;
> +				break;
> +			case 'U':
> +				alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
> +				break;
> +			default:
> +				pr_err("Invalid ALUA state '%c'\n", state);
> +				ret = -EINVAL;
> +				goto out;
> +			}
> +
> +			ret = core_alua_allocate_lba_map_mem(lba_map,
> +							     pg_id, alua_state);
> +			if (ret) {
> +				pr_err("Invalid target descriptor %d:%c "
> +				       "at line %d\n",
> +				       pg_id, state, num);
> +				break;
> +			}
> +			pg++;
> +			ptr = strchr(ptr, ' ');
> +			if (ptr)
> +				ptr++;
> +			else
> +				break;
> +		}
> +		if (pg_num == -1)
> +		    pg_num = pg;
> +		else if (pg != pg_num) {
> +			pr_err("Only %d from %d port groups definitions "
> +			       "at line %d\n", pg, pg_num, num);
> +			ret = -EINVAL;
> +			break;
> +		}

Btw, checkpatch complains about conditionals that don't have matching
brackets on both code block, eg:

   if foo
   else {
       bar
   }

> +		num++;
> +	}
> +out:
> +	if (ret) {
> +		core_alua_free_lba_map(&lba_list);
> +		count = ret;
> +	} else
> +		core_alua_set_lba_map(dev, &lba_list,
> +				      segment_size, segment_mult);
> +	kfree(map_entries);
> +	return count;
> +}
> +
> +static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
> +	.attr	= { .ca_owner = THIS_MODULE,
> +		    .ca_name = "lba_map",
> +		    .ca_mode = S_IRUGO | S_IWUSR },
> +	.show	= target_core_show_dev_lba_map,
> +	.store	= target_core_store_dev_lba_map,
> +};
> +
>  static struct configfs_attribute *lio_core_dev_attrs[] = {
>  	&target_core_attr_dev_info.attr,
>  	&target_core_attr_dev_control.attr,
> @@ -1748,6 +1918,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
>  	&target_core_attr_dev_udev_path.attr,
>  	&target_core_attr_dev_enable.attr,
>  	&target_core_attr_dev_alua_lu_gp.attr,
> +	&target_core_attr_dev_lba_map.attr,
>  	NULL,
>  };
>  
> diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
> index f71cc33..6db76af 100644
> --- a/drivers/target/target_core_device.c
> +++ b/drivers/target/target_core_device.c
> @@ -1578,6 +1578,7 @@ void target_free_device(struct se_device *dev)
>  	}
>  
>  	core_alua_free_lu_gp_mem(dev);
> +	core_alua_set_lba_map(dev, NULL, 0, 0);
>  	core_scsi3_free_all_registrations(dev);
>  	se_release_vpd_for_dev(dev);
>  
> diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
> index 98bb7c4..e34d4b4 100644
> --- a/drivers/target/target_core_transport.c
> +++ b/drivers/target/target_core_transport.c
> @@ -63,6 +63,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
>  struct kmem_cache *t10_alua_lu_gp_mem_cache;
>  struct kmem_cache *t10_alua_tg_pt_gp_cache;
>  struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
> +struct kmem_cache *t10_alua_lba_map_cache;
> +struct kmem_cache *t10_alua_lba_map_mem_cache;
>  
>  static void transport_complete_task_attr(struct se_cmd *cmd);
>  static void transport_handle_queue_full(struct se_cmd *cmd,
> @@ -129,14 +131,36 @@ int init_se_kmem_caches(void)
>  				"mem_t failed\n");
>  		goto out_free_tg_pt_gp_cache;
>  	}
> +	t10_alua_lba_map_cache = kmem_cache_create(
> +			"t10_alua_lba_map_cache",
> +			sizeof(struct t10_alua_lba_map),
> +			__alignof__(struct t10_alua_lba_map), 0, NULL);
> +	if (!t10_alua_lba_map_cache) {
> +		pr_err("kmem_cache_create() for t10_alua_lba_map_"
> +				"cache failed\n");
> +		goto out_free_tg_pt_gp_mem_cache;
> +	}
> +	t10_alua_lba_map_mem_cache = kmem_cache_create(
> +			"t10_alua_lba_map_mem_cache",
> +			sizeof(struct t10_alua_lba_map_member),
> +			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
> +	if (!t10_alua_lba_map_mem_cache) {
> +		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
> +				"cache failed\n");
> +		goto out_free_lba_map_cache;
> +	}
>  
>  	target_completion_wq = alloc_workqueue("target_completion",
>  					       WQ_MEM_RECLAIM, 0);
>  	if (!target_completion_wq)
> -		goto out_free_tg_pt_gp_mem_cache;
> +		goto out_free_lba_map_mem_cache;
>  
>  	return 0;
>  
> +out_free_lba_map_mem_cache:
> +	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
> +out_free_lba_map_cache:
> +	kmem_cache_destroy(t10_alua_lba_map_cache);
>  out_free_tg_pt_gp_mem_cache:
>  	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
>  out_free_tg_pt_gp_cache:
> @@ -165,6 +189,8 @@ void release_se_kmem_caches(void)
>  	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
>  	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
>  	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
> +	kmem_cache_destroy(t10_alua_lba_map_cache);
> +	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
>  }
>  
>  /* This code ensures unique mib indexes are handed out. */

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] target_core_alua: Referrals infrastructure
  2013-10-16 22:28   ` Nicholas A. Bellinger
@ 2013-10-17  7:38     ` Hannes Reinecke
  0 siblings, 0 replies; 7+ messages in thread
From: Hannes Reinecke @ 2013-10-17  7:38 UTC (permalink / raw)
  To: Nicholas A. Bellinger
  Cc: Nic Bellinger, Doug Gilber, target-devel, linux-scsi

On 10/17/2013 12:28 AM, Nicholas A. Bellinger wrote:
> On Wed, 2013-10-16 at 09:25 +0200, Hannes Reinecke wrote:
>> Add infrastructure for referrals.
>>
>> Signed-off-by: Hannes Reinecke <hare@suse.de>
>> ---
>>  drivers/target/target_core_alua.c     | 151 ++++++++++++++++++++++++++++++++++
>>  drivers/target/target_core_alua.h     |   4 +-
>>  drivers/target/target_core_configfs.c |  12 ++-
>>  drivers/target/target_core_device.c   |   2 +
>>  drivers/target/target_core_sbc.c      |   5 +-
>>  drivers/target/target_core_spc.c      |  20 +++++
>>  include/scsi/scsi.h                   |   1 +
>>  include/target/target_core_base.h     |  18 ++++
>>  8 files changed, 209 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
>> index 166bee6..8f66146 100644
>> --- a/drivers/target/target_core_alua.c
>> +++ b/drivers/target/target_core_alua.c
>> @@ -56,6 +56,75 @@ static LIST_HEAD(lu_gps_list);
>>  struct t10_alua_lu_gp *default_lu_gp;
>>  
>>  /*
>> + * REPORT REFERRALS
>> + *
>> + * See sbc3r35 section 5.23
>> + */
>> +sense_reason_t
>> +target_emulate_report_referrals(struct se_cmd *cmd)
>> +{
>> +	struct se_device *dev = cmd->se_dev;
>> +	struct t10_alua_lba_map *map;
>> +	struct t10_alua_lba_map_member *map_mem;
>> +	unsigned char *buf;
>> +	u32 rd_len = 0, off;
>> +
>> +	if (cmd->data_length < 4) {
>> +		pr_warn("REPORT REFERRALS allocation length %u too"
>> +			" small\n", cmd->data_length);
>> +		return TCM_INVALID_CDB_FIELD;
>> +	}
>> +
>> +	buf = transport_kmap_data_sg(cmd);
>> +	if (!buf)
>> +		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
>> +
>> +	off = 4;
>> +	spin_lock(&dev->t10_alua.lba_map_lock);
>> +	if (list_empty(&dev->t10_alua.lba_map_list)) {
>> +		spin_unlock(&dev->t10_alua.lba_map_lock);
>> +		transport_kunmap_data_sg(cmd);
>> +
>> +		return TCM_UNSUPPORTED_SCSI_OPCODE;
>> +	}
>> +
>> +	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
>> +			    lba_map_list) {
>> +		int desc_num = off + 3;
>> +		int pg_num;
>> +
>> +		off += 4;
>> +		put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
>> +		off += 8;
>> +		put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
>> +		off += 8;
>> +		rd_len += 20;
>> +		pg_num = 0;
>> +		list_for_each_entry(map_mem, &map->lba_map_mem_list,
>> +				    lba_map_mem_list) {
>> +			buf[off++] = map_mem->lba_map_mem_alua_state & 0x0f;
>> +			off++;
>> +			buf[off++] = (map_mem->lba_map_mem_alua_pg_id >> 8) & 0xff;
>> +			buf[off++] = (map_mem->lba_map_mem_alua_pg_id & 0xff);
>> +			rd_len += 4;
>> +			pg_num++;
>> +		}
>> +		buf[desc_num] = pg_num;
>> +	}
>> +	spin_unlock(&dev->t10_alua.lba_map_lock);
>> +
> 
> For both of these list walks, there needs to be a check against offset
> vs. ->data_length to know when the available payload length has been
> exhausted..
> 
Right. Will be fixing it up.

[ .. ]
>> diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
>> index e39d442..282b5bb 100644
>> --- a/drivers/target/target_core_spc.c
>> +++ b/drivers/target/target_core_spc.c
>> @@ -476,6 +476,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
>>  	/* If WriteCache emulation is enabled, set V_SUP */
>>  	if (spc_check_dev_wce(dev))
>>  		buf[6] = 0x01;
>> +	/* If an LBA map is present set R_SUP */
>> +	spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
>> +	if (!list_empty(&dev->t10_alua.lba_map_list))
>> +		buf[8] = 0x10;
>> +	spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
>>  	return 0;
>>  }
> 
> Is there ever a case where R_SUP should be reported, but lba_map_list is
> empty..?
> 
Not that I can see. If R_SUP is set it means the 'REPORT REFERRALS'
is supported. And 'REPORT REFERRALS' without a map is pretty much
pointless.

> How about a se_device attribute called 'emulate_referrals' to determine
> when to report R_SUP..?  Otherwise, perhaps using the se_lun -> se_port
> -> sep_alua_tg_pt_gp_mem -> tg_pt_gp provided bit for
> tg_pt_gp_alua_supported_states instead..?
> 
I was thinking about the very same thing, but then figured it was
easier to equal R_SUP with !list_empty(lba_map_list) instead of
having a separate flag.
Or crawling indirections just to find the very same information ...

>>  
>> @@ -627,6 +632,20 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
>>  	return 0;
>>  }
>>  
>> +/* Referrals VPD page */
>> +static sense_reason_t
>> +spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
>> +{
>> +	struct se_device *dev = cmd->se_dev;
>> +
>> +	buf[0] = dev->transport->get_device_type(dev);
>> +	buf[3] = 0x0c;
>> +	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
>> +	put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
>> +
> 
> Typo..  Offset for byte 12 should be the lba_map_segment_multiplier..
> 
Oops ...

Will be fixing up the patch.

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		      zSeries & Storage
hare@suse.de			      +49 911 74053 688
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: J. Hawn, J. Guild, F. Imendörffer, HRB 16746 (AG Nürnberg)
--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] target_core_alua: Referrals configfs integration
  2013-10-17  0:36   ` Nicholas A. Bellinger
@ 2013-10-17  7:42     ` Hannes Reinecke
  0 siblings, 0 replies; 7+ messages in thread
From: Hannes Reinecke @ 2013-10-17  7:42 UTC (permalink / raw)
  To: Nicholas A. Bellinger
  Cc: Nic Bellinger, Doug Gilber, target-devel, linux-scsi

On 10/17/2013 02:36 AM, Nicholas A. Bellinger wrote:
> On Wed, 2013-10-16 at 09:25 +0200, Hannes Reinecke wrote:
>> Referrals need an LBA map, which needs to be kept
>> consistent across all target port groups. So
>> instead of tying the map to the target port groups
>> I've implemented a single attribute containing the
>> entire map.
>>
>> Signed-off-by: Hannes Reinecke <hare@suse.de>
>> ---
>>  drivers/target/target_core_alua.c      | 101 +++++++++++++++++++
>>  drivers/target/target_core_alua.h      |   8 ++
>>  drivers/target/target_core_configfs.c  | 171 +++++++++++++++++++++++++++++++++
>>  drivers/target/target_core_device.c    |   1 +
>>  drivers/target/target_core_transport.c |  28 +++++-
>>  5 files changed, 308 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
>> index 8f66146..9dd01ff 100644
>> --- a/drivers/target/target_core_alua.c
>> +++ b/drivers/target/target_core_alua.c
>> @@ -1340,6 +1340,107 @@ static int core_alua_set_tg_pt_secondary_state(
>>  	return 0;
>>  }
>>  
>> +struct t10_alua_lba_map *
>> +core_alua_allocate_lba_map(struct list_head *list,
>> +			   u64 first_lba, u64 last_lba)
>> +{
>> +	struct t10_alua_lba_map *lba_map;
>> +
>> +	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
>> +	if (!lba_map) {
>> +		pr_err("Unable to allocate struct t10_alua_lba_map\n");
>> +		return ERR_PTR(-ENOMEM);
>> +	}
>> +	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
>> +	lba_map->lba_map_first_lba = first_lba;
>> +	lba_map->lba_map_last_lba = last_lba;
>> +
>> +	list_add_tail(&lba_map->lba_map_list, list);
>> +	return lba_map;
>> +}
> 
> This list_add_tail needs to be protected, no..?
> 
No. The current usage is that we first construct the mapping in
memory, and then switching maps in set_lba_map.
This way we only need to protect the switch itself, not the map
construction.

>> +
>> +int
>> +core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
>> +			       int pg_id, int state)
>> +{
>> +	struct t10_alua_lba_map_member *lba_map_mem;
>> +
>> +	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
>> +			    lba_map_mem_list) {
>> +		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
>> +			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
>> +			return -EINVAL;
>> +		}
>> +	}
>> +
>> +	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
>> +	if (!lba_map_mem) {
>> +		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
>> +		return -ENOMEM;
>> +	}
>> +	lba_map_mem->lba_map_mem_alua_state = state;
>> +	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
>> +
>> +	list_add_tail(&lba_map_mem->lba_map_mem_list,
>> +		      &lba_map->lba_map_mem_list);
>> +	return 0;
>> +}
> 
> Ditto here..
> 
See above.

>> +
>> +void
>> +core_alua_free_lba_map(struct list_head *lba_list)
>> +{
>> +	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
>> +	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
>> +
>> +	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
>> +				 lba_map_list) {
>> +		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
>> +					 &lba_map->lba_map_mem_list,
>> +					 lba_map_mem_list) {
>> +			list_del(&lba_map_mem->lba_map_mem_list);
>> +			kmem_cache_free(t10_alua_lba_map_mem_cache,
>> +					lba_map_mem);
>> +		}
>> +		list_del(&lba_map->lba_map_list);
>> +		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
>> +	}
>> +}
> 
> And here..
> 
>> +
>> +void
>> +core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
>> +		      int segment_size, int segment_mult)
>> +{
>> +	struct list_head old_lba_map_list;
>> +	struct t10_alua_tg_pt_gp *tg_pt_gp;
>> +	int activate = 0, supported;
>> +
>> +	INIT_LIST_HEAD(&old_lba_map_list);
>> +	spin_lock(&dev->t10_alua.lba_map_lock);
>> +	dev->t10_alua.lba_map_segment_size = segment_size;
>> +	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
>> +	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
>> +	if (lba_map_list) {
>> +		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
>> +		activate = 1;
>> +	}
>> +	spin_unlock(&dev->t10_alua.lba_map_lock);
>> +	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
>> +	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
>> +			    tg_pt_gp_list) {
>> +
>> +		if (!tg_pt_gp->tg_pt_gp_valid_id)
>> +			continue;
>> +		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
>> +		if (activate)
>> +			supported |= ALUA_LBD_SUP;
>> +		else
>> +			supported &= ~ALUA_LBD_SUP;
>> +		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
>> +	}
>> +	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
>> +	core_alua_free_lba_map(&old_lba_map_list);
>> +}
>> +
>>  struct t10_alua_lu_gp *
>>  core_alua_allocate_lu_gp(const char *name, int def_group)
>>  {

This is what I meant; I'm protecting the map switching, not the map
construction.

>> diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
>> index 172a54e..613cafb 100644
>> --- a/drivers/target/target_core_configfs.c
>> +++ b/drivers/target/target_core_configfs.c
>> @@ -1741,6 +1741,176 @@ static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
>>  	.store	= target_core_store_alua_lu_gp,
>>  };
>>  
>> +static ssize_t target_core_show_dev_lba_map(void *p, char *page)
>> +{
>> +	struct se_device *dev = p;
>> +	struct t10_alua_lba_map *map;
>> +	struct t10_alua_lba_map_member *mem;
>> +	char *b = page;
>> +	int bl = 0;
>> +	char state;
>> +
>> +	spin_lock(&dev->t10_alua.lba_map_lock);
>> +	if (!list_empty(&dev->t10_alua.lba_map_list))
>> +	    bl += sprintf(b + bl, "%u %u\n",
>> +			  dev->t10_alua.lba_map_segment_size,
>> +			  dev->t10_alua.lba_map_segment_multiplier);
>> +	list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
>> +		bl += sprintf(b + bl, "%llu %llu",
>> +			      map->lba_map_first_lba, map->lba_map_last_lba);
>> +		list_for_each_entry(mem, &map->lba_map_mem_list,
>> +				    lba_map_mem_list) {
>> +			switch (mem->lba_map_mem_alua_state) {
>> +			case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
>> +				state = 'O';
>> +				break;
>> +			case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
>> +				state = 'A';
>> +				break;
>> +			case ALUA_ACCESS_STATE_STANDBY:
>> +				state = 'S';
>> +				break;
>> +			case ALUA_ACCESS_STATE_UNAVAILABLE:
>> +				state = 'U';
>> +				break;
>> +			default:
>> +				state = '.';
>> +				break;
>> +			}
>> +			bl += sprintf(b + bl, " %d:%c",
>> +				      mem->lba_map_mem_alua_pg_id, state);
>> +		}
>> +		bl += sprintf(b + bl, "\n");
>> +	}
>> +	spin_unlock(&dev->t10_alua.lba_map_lock);
>> +	return bl;
>> +}
> 
> Unfortunately due to the existing limitations of configfs/sysfs
> attribute output, the writing to *page needs to be limited to PAGE_SIZE.
> 
Okay, I'll be inserting the respective checks.

>> +
>> +static ssize_t target_core_store_dev_lba_map(
>> +	void *p,
>> +	const char *page,
>> +	size_t count)
>> +{
>> +	struct se_device *dev = p;
>> +	struct t10_alua_lba_map *lba_map = NULL;
>> +	struct list_head lba_list;
>> +	char *map_entries, *ptr;
>> +	char state;
>> +	int pg_num = -1, pg;
>> +	int ret = 0, num = 0, pg_id, alua_state;
>> +	unsigned long start_lba = -1, end_lba = -1;
>> +	unsigned long segment_size = -1, segment_mult = -1;
>> +
>> +	map_entries = kstrdup(page, GFP_KERNEL);
>> +	if (!map_entries)
>> +		return -ENOMEM;
>> +
>> +	INIT_LIST_HEAD(&lba_list);
>> +	while ((ptr = strsep(&map_entries, "\n")) != NULL) {
>> +		if (!*ptr)
>> +			continue;
>> +
>> +		if (num == 0) {
>> +			if (sscanf(ptr, "%lu %lu\n",
>> +				   &segment_size, &segment_mult) != 2) {
>> +				pr_err("Invalid line %d\n", num);
>> +				ret = -EINVAL;
>> +				break;
>> +			}
>> +			num++;
>> +			continue;
>> +		}
>> +		if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
>> +			pr_err("Invalid line %d\n", num);
>> +			ret = -EINVAL;
>> +			break;
>> +		}
>> +		ptr = strchr(ptr, ' ');
>> +		if (!ptr) {
>> +			pr_err("Invalid line %d, missing end lba\n", num);
>> +			ret = -EINVAL;
>> +			break;
>> +		}
>> +		ptr++;
>> +		ptr = strchr(ptr, ' ');
>> +		if (!ptr) {
>> +			pr_err("Invalid line %d, missing state definitions\n",
>> +			       num);
>> +			ret = -EINVAL;
>> +			break;
>> +		}
>> +		ptr++;
>> +		lba_map = core_alua_allocate_lba_map(&lba_list,
>> +						     start_lba, end_lba);
>> +		if (IS_ERR(lba_map)) {
>> +			ret = PTR_ERR(lba_map);>> +		num++;
>> +	}
>> +out:
>> +	if (ret) {
>> +		core_alua_free_lba_map(&lba_list);
>> +		count = ret;
>> +	} else
>> +		core_alua_set_lba_map(dev, &lba_list,
>> +				      segment_size, segment_mult);
>> +	kfree(map_entries);
>> +	return count;
>> +}
>> +
>> +static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
>> +	.attr	= { .ca_owner = THIS_MODULE,
>> +		    .ca_name = "lba_map",
>> +		    .ca_mode = S_IRUGO | S_IWUSR },
>> +	.show	= target_core_show_dev_lba_map,
>> +	.store	= target_core_store_dev_lba_map,
>> +};
>> +
>>  static struct configfs_attribute *lio_core_dev_attrs[] = {
>>  	&target_core_attr_dev_info.attr,
>>  	&target_core_attr_dev_control.attr,
>> @@ -1748,6 +1918,7 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
>>  	&target_core_attr_dev_udev_path.attr,
>>  	&target_core_attr_dev_enable.attr,
>>  	&target_core_attr_dev_alua_lu_gp.attr,
>> +	&target_core_attr_dev_lba_map.attr,
>>  	NULL,
>>  };
>>  
>> diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
>> index f71cc33..6db76af 100644
>> --- a/drivers/target/target_core_device.c
>> +++ b/drivers/target/target_core_device.c
>> @@ -1578,6 +1578,7 @@ void target_free_device(struct se_device *dev)
>>  	}
>>  
>>  	core_alua_free_lu_gp_mem(dev);
>> +	core_alua_set_lba_map(dev, NULL, 0, 0);
>>  	core_scsi3_free_all_registrations(dev);
>>  	se_release_vpd_for_dev(dev);
>>  
>> diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
>> index 98bb7c4..e34d4b4 100644
>> --- a/drivers/target/target_core_transport.c
>> +++ b/drivers/target/target_core_transport.c
>> @@ -63,6 +63,8 @@ struct kmem_cache *t10_alua_lu_gp_cache;
>>  struct kmem_cache *t10_alua_lu_gp_mem_cache;
>>  struct kmem_cache *t10_alua_tg_pt_gp_cache;
>>  struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
>> +struct kmem_cache *t10_alua_lba_map_cache;
>> +struct kmem_cache *t10_alua_lba_map_mem_cache;
>>  
>>  static void transport_complete_task_attr(struct se_cmd *cmd);
>>  static void transport_handle_queue_full(struct se_cmd *cmd,
>> @@ -129,14 +131,36 @@ int init_se_kmem_caches(void)
>>  				"mem_t failed\n");
>>  		goto out_free_tg_pt_gp_cache;
>>  	}
>> +	t10_alua_lba_map_cache = kmem_cache_create(
>> +			"t10_alua_lba_map_cache",
>> +			sizeof(struct t10_alua_lba_map),
>> +			__alignof__(struct t10_alua_lba_map), 0, NULL);
>> +	if (!t10_alua_lba_map_cache) {
>> +		pr_err("kmem_cache_create() for t10_alua_lba_map_"
>> +				"cache failed\n");
>> +		goto out_free_tg_pt_gp_mem_cache;
>> +	}
>> +	t10_alua_lba_map_mem_cache = kmem_cache_create(
>> +			"t10_alua_lba_map_mem_cache",
>> +			sizeof(struct t10_alua_lba_map_member),
>> +			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
>> +	if (!t10_alua_lba_map_mem_cache) {
>> +		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
>> +				"cache failed\n");
>> +		goto out_free_lba_map_cache;
>> +	}
>>  
>>  	target_completion_wq = alloc_workqueue("target_completion",
>>  					       WQ_MEM_RECLAIM, 0);
>>  	if (!target_completion_wq)
>> -		goto out_free_tg_pt_gp_mem_cache;
>> +		goto out_free_lba_map_mem_cache;
>>  
>>  	return 0;
>>  
>> +out_free_lba_map_mem_cache:
>> +	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
>> +out_free_lba_map_cache:
>> +	kmem_cache_destroy(t10_alua_lba_map_cache);
>>  out_free_tg_pt_gp_mem_cache:
>>  	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
>>  out_free_tg_pt_gp_cache:
>> @@ -165,6 +189,8 @@ void release_se_kmem_caches(void)
>>  	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
>>  	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
>>  	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
>> +	kmem_cache_destroy(t10_alua_lba_map_cache);
>> +	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
>>  }
>>  
>>  /* This code ensures unique mib indexes are handed out. */
> 
> 

>> +			break;
>> +		}
>> +		pg = 0;
>> +		while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
>> +			switch (state) {
>> +			case 'O':
>> +				alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
>> +				break;
>> +			case 'A':
>> +				alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
>> +				break;
>> +			case 'S':
>> +				alua_state = ALUA_ACCESS_STATE_STANDBY;
>> +				break;
>> +			case 'U':
>> +				alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
>> +				break;
>> +			default:
>> +				pr_err("Invalid ALUA state '%c'\n", state);
>> +				ret = -EINVAL;
>> +				goto out;
>> +			}
>> +
>> +			ret = core_alua_allocate_lba_map_mem(lba_map,
>> +							     pg_id, alua_state);
>> +			if (ret) {
>> +				pr_err("Invalid target descriptor %d:%c "
>> +				       "at line %d\n",
>> +				       pg_id, state, num);
>> +				break;
>> +			}
>> +			pg++;
>> +			ptr = strchr(ptr, ' ');
>> +			if (ptr)
>> +				ptr++;
>> +			else
>> +				break;
>> +		}
>> +		if (pg_num == -1)
>> +		    pg_num = pg;
>> +		else if (pg != pg_num) {
>> +			pr_err("Only %d from %d port groups definitions "
>> +			       "at line %d\n", pg, pg_num, num);
>> +			ret = -EINVAL;
>> +			break;
>> +		}
> 
> Btw, checkpatch complains about conditionals that don't have matching
> brackets on both code block, eg:
> 
>    if foo
>    else {
>        bar
>    }
> 
Ah. Of course.

Cheers,

Hannes
-- 
Dr. Hannes Reinecke		      zSeries & Storage
hare@suse.de			      +49 911 74053 688
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: J. Hawn, J. Guild, F. Imendörffer, HRB 16746 (AG Nürnberg)

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2013-10-17  7:42 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-10-16  7:25 [PATCH 0/2] TCM Referrals support Hannes Reinecke
2013-10-16  7:25 ` [PATCH 1/2] target_core_alua: Referrals infrastructure Hannes Reinecke
2013-10-16 22:28   ` Nicholas A. Bellinger
2013-10-17  7:38     ` Hannes Reinecke
2013-10-16  7:25 ` [PATCH 2/2] target_core_alua: Referrals configfs integration Hannes Reinecke
2013-10-17  0:36   ` Nicholas A. Bellinger
2013-10-17  7:42     ` Hannes Reinecke

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).